language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/scalar_pointwise.py | {
"start": 2167,
"end": 2331
} | class ____(ScalarPointwiseOperator):
"""Operator for scalar subtraction."""
def __init__(self):
super().__init__("scalar_sub", "-")
| ScalarSubOperator |
python | kamyu104__LeetCode-Solutions | Python/insufficient-nodes-in-root-to-leaf-paths.py | {
"start": 191,
"end": 736
} | class ____(object):
def sufficientSubset(self, root, limit):
"""
:type root: TreeNode
:type limit: int
:rtype: TreeNode
"""
if not root:
return None
if not root.left and not root.right:
return None if root.val < limit else root
root.left = self.sufficientSubset(root.left, limit-root.val)
root.right = self.sufficientSubset(root.right, limit-root.val)
if not root.left and not root.right:
return None
return root
| Solution |
python | google__pytype | pytype/tests/test_namedtuple2.py | {
"start": 1691,
"end": 3056
} | class ____(test_base.BaseTest):
"""Tests for collections.namedtuple in Python 3."""
def test_bad_call(self):
"""The last two arguments are kwonly in 3.6."""
self.InferWithErrors("""
import collections
collections.namedtuple() # missing-parameter
collections.namedtuple("_") # missing-parameter
collections.namedtuple("_", "", True) # wrong-arg-count
collections.namedtuple("_", "", True, True) # wrong-arg-count
collections.namedtuple("_", "", True, True, True) # wrong-arg-count
""")
def test_nested_namedtuple(self):
self.Check("""
from typing import NamedTuple
class Bar:
class Foo(NamedTuple):
x: int
foo = Foo(x=0)
""")
def test_namedtuple_defaults(self):
self.Check("""
import collections
X = collections.namedtuple('X', ['a', 'b'], defaults=[0])
X('a')
X('a', 'b')
""")
def test_variable_annotations(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple('X', ['a', 'b'])):
a: int
b: str
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import NamedTuple
class X(NamedTuple):
a: int
b: str
""",
)
if __name__ == "__main__":
test_base.main()
| NamedtupleTestsPy3 |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 16905,
"end": 17491
} | class ____(LocalizableStreamlitException):
"""Exception raised when an invalid key is provided in the default dict."""
def __init__(self, state_key: str, available_keys: list[str]) -> None:
super().__init__(
"Key `'{state_key}'` in `default` is not a valid state name. "
"Valid state names are those with corresponding `on_{{state_name}}_change` "
"callbacks. Available state names: `{available_keys}`",
state_key=state_key,
available_keys=available_keys or "none",
)
| BidiComponentInvalidDefaultKeyError |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 5660,
"end": 6287
} | class ____(HasOptions, Generic[R, T0, T1, T2, T3]):
def __init__(self, function: Callable[[T0, T1, T2, T3], R]) -> None:
pass
def remote(
self,
__arg0: "Union[T0, ObjectRef[T0]]",
__arg1: "Union[T1, ObjectRef[T1]]",
__arg2: "Union[T2, ObjectRef[T2]]",
__arg3: "Union[T3, ObjectRef[T3]]",
) -> "ObjectRef[R]":
...
def bind(
self,
__arg0: "Union[T0, DAGNode[T0]]",
__arg1: "Union[T1, DAGNode[T1]]",
__arg2: "Union[T2, DAGNode[T2]]",
__arg3: "Union[T3, DAGNode[T3]]",
) -> "DAGNode[R]":
...
| RemoteFunction3 |
python | numpy__numpy | numpy/matrixlib/tests/test_matrix_linalg.py | {
"start": 1945,
"end": 2024
} | class ____(_TestNorm2DMatrix, _TestNormDoubleBase):
pass
| TestNormDoubleMatrix |
python | ray-project__ray | python/ray/train/v2/_internal/execution/failure_handling/failure_policy.py | {
"start": 235,
"end": 779
} | class ____(abc.ABC):
"""A policy that determines how to handle user and system failures.
FailurePolicy will handle the controller failure and worker errors during training.
This can be used to implement fault tolerance and error recovery.
"""
def __init__(self, failure_config: FailureConfig):
self.failure_config = failure_config
@abc.abstractmethod
def make_decision(
self,
training_failed_error: TrainingFailedError,
) -> FailureDecision:
raise NotImplementedError
| FailurePolicy |
python | euske__pdfminer | pdfminer/cmapdb.py | {
"start": 683,
"end": 738
} | class ____(Exception):
pass
## CMapBase
##
| CMapError |
python | keras-team__keras | keras/src/activations/activations.py | {
"start": 1783,
"end": 15880
} | class ____(ops.Operation):
def __init__(
self, negative_slope=0.0, max_value=None, threshold=0.0, name=None
):
super().__init__(name=name)
self.negative_slope = negative_slope
self.max_value = max_value
self.threshold = threshold
def call(self, x):
return self.static_call(
x,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def compute_output_spec(self, x):
return backend.KerasTensor(x.shape, x.dtype)
@staticmethod
def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
x = backend.convert_to_tensor(x)
if negative_slope != 0.0:
if max_value is None and threshold == 0:
return backend.nn.leaky_relu(x, negative_slope=negative_slope)
if threshold != 0:
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
else:
negative_part = 1
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
threshold = ops.cast(threshold, dtype=x.dtype)
x = x * backend.cast(
backend.numpy.greater(x, threshold), dtype=x.dtype
)
elif max_value == 6:
# if no threshold, then can use nn.relu6 native op for performance
x = backend.nn.relu6(x)
clip_max = False
else:
x = backend.nn.relu(x)
if clip_max:
min_value = ops.cast(0.0, dtype=x.dtype)
max_value = ops.cast(max_value, dtype=x.dtype)
x = backend.numpy.clip(x, min_value, max_value)
if negative_slope != 0.0:
x -= negative_slope * negative_part
return x
@keras_export("keras.activations.leaky_relu")
def leaky_relu(x, negative_slope=0.2):
"""Leaky relu activation function.
Args:
x: Input tensor.
negative_slope: A `float` that controls the slope
for values lower than the threshold.
"""
return ops.leaky_relu(x, negative_slope=negative_slope)
@keras_export("keras.activations.relu6")
def relu6(x):
"""Relu6 activation function.
It's the ReLU function, but truncated to a maximum value of 6.
Args:
x: Input tensor.
"""
return ops.relu6(x)
@keras_export("keras.activations.softmax")
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range `[0, 1]` and sum to 1.
Each input vector is handled independently.
The `axis` argument sets which axis of the input the function
is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
"""
output = ops.softmax(x, axis=axis)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_export("keras.activations.elu")
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is defined as:
- `x` if `x > 0`
- alpha * `exp(x) - 1` if `x < 0`
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Args:
x: Input tensor.
alpha: A scalar, slope of positive section. Defaults to `1.0`.
Reference:
- [Clevert et al., 2016](https://arxiv.org/abs/1511.07289)
"""
return ops.elu(x, alpha=alpha)
@keras_export("keras.activations.selu")
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `scale * x` if `x > 0`
- `scale * alpha * (exp(x) - 1)` if `x < 0`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Args:
x: Input tensor.
Notes:
- To be used together with the
`keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`keras.layers.AlphaDropout` (rather than regular dropout).
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return ops.selu(x)
@keras_export("keras.activations.softplus")
def softplus(x):
"""Softplus activation function.
It is defined as: `softplus(x) = log(exp(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softplus(x)
@keras_export("keras.activations.softsign")
def softsign(x):
"""Softsign activation function.
Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softsign(x)
@keras_export("keras.activations.soft_shrink")
def soft_shrink(x, threshold=0.5):
"""Soft Shrink activation function.
It is defined as:
`soft_shrink(x) = x - threshold` if `x > threshold`,
`soft_shrink(x) = x + threshold` if `x < -threshold`,
`soft_shrink(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
"""
return ops.soft_shrink(x, threshold=threshold)
@keras_export("keras.activations.sparse_plus")
def sparse_plus(x):
"""SparsePlus activation function.
SparsePlus is defined as:
`sparse_plus(x) = 0` for `x <= -1`.
`sparse_plus(x) = (1/4) * (x + 1)^2` for `-1 < x < 1`.
`sparse_plus(x) = x` for `x >= 1`.
Args:
x: Input tensor.
"""
return ops.sparse_plus(x)
@keras_export(["keras.activations.silu", "keras.activations.swish"])
def silu(x):
"""Swish (or Silu) activation function.
It is defined as: `swish(x) = x * sigmoid(x)`.
The Swish (or Silu) activation function is a smooth,
non-monotonic function that is unbounded above and
bounded below.
Args:
x: Input tensor.
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return ops.silu(x)
@keras_export("keras.activations.squareplus")
def squareplus(x, b=4):
"""Squareplus activation function.
The Squareplus activation function is defined as:
`f(x) = (x + sqrt(x^2 + b)) / 2`
Where `b` is a smoothness parameter.
Args:
x: Input tensor.
b: Smoothness parameter. Defaults to 4.
Reference:
- [Ramachandran et al., 2021](https://arxiv.org/abs/2112.11687)
"""
return ops.squareplus(x, b=b)
@keras_export("keras.activations.gelu")
def gelu(x, approximate=False):
"""Gaussian error linear unit (GELU) activation function.
The Gaussian error linear unit (GELU) is defined as:
`gelu(x) = x * P(X <= x)` where `P(X) ~ N(0, 1)`,
i.e. `gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))`.
GELU weights inputs by their value, rather than gating
inputs by their sign as in ReLU.
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Reference:
- [Hendrycks et al., 2016](https://arxiv.org/abs/1606.08415)
"""
return ops.gelu(x, approximate=approximate)
@keras_export("keras.activations.celu")
def celu(x, alpha=1.0):
"""Continuously Differentiable Exponential Linear Unit.
The CeLU activation function is defined as:
`celu(x) = alpha * (exp(x / alpha) - 1) for x < 0`,`celu(x) = x for x >= 0`.
where `alpha` is a scaling parameter that controls the activation's shape.
Args:
x: Input tensor.
alpha: The α value for the CeLU formulation. Defaults to `1.0`.
Reference:
- [Barron, J. T., 2017](https://arxiv.org/abs/1704.07483)
"""
return ops.celu(x, alpha=alpha)
@keras_export("keras.activations.glu")
def glu(x, axis=-1):
"""Gated Linear Unit (GLU) activation function.
The GLU activation function is defined as:
`glu(x) = a * sigmoid(b)`,
where `x` is split into two equal parts `a` and `b` along the given axis.
Args:
x: Input tensor.
axis: The axis along which to split the input tensor. Defaults to `-1`.
Reference:
- [Dauphin et al., 2017](https://arxiv.org/abs/1612.08083)
"""
return ops.glu(x, axis=axis)
@keras_export("keras.activations.tanh")
def tanh(x):
"""Hyperbolic tangent activation function.
It is defined as:
`tanh(x) = sinh(x) / cosh(x)`, i.e.
`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.
Args:
x: Input tensor.
"""
return ops.tanh(x)
@keras_export("keras.activations.tanh_shrink")
def tanh_shrink(x):
"""Tanh shrink activation function.
It is defined as:
`f(x) = x - tanh(x)`.
Args:
x: Input tensor.
"""
return ops.tanh_shrink(x)
@keras_export("keras.activations.hard_tanh")
def hard_tanh(x):
"""HardTanh activation function.
It is defined as:
`hard_tanh(x) = -1 for x < -1`,
`hard_tanh(x) = x for -1 <= x <= 1`,
`hard_tanh(x) = 1 for x > 1`.
Args:
x: Input tensor.
"""
return ops.hard_tanh(x)
@keras_export("keras.activations.hard_shrink")
def hard_shrink(x, threshold=0.5):
"""Hard Shrink activation function.
It is defined as:
`hard_shrink(x) = x` if `|x| > threshold`,
`hard_shrink(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
"""
return ops.hard_shrink(x, threshold=threshold)
@keras_export("keras.activations.threshold")
def threshold(x, threshold, default_value):
"""Threshold activation function.
It is defined as:
`threshold(x) = x` if `x > threshold`,
`threshold(x) = default_value` otherwise.
Args:
x: Input tensor.
threshold: The value that decides when to retain or replace x.
default_value: Value to assign when `x <= threshold`.
"""
return ops.threshold(x, threshold, default_value)
@keras_export("keras.activations.sigmoid")
def sigmoid(x):
"""Sigmoid activation function.
It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.
For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
Args:
x: Input tensor.
"""
output = ops.sigmoid(x)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_export("keras.activations.exponential")
def exponential(x):
"""Exponential activation function.
Args:
x: Input tensor.
"""
return ops.exp(x)
@keras_export("keras.activations.hard_sigmoid")
def hard_sigmoid(x):
"""Hard sigmoid activation function.
The hard sigmoid activation is defined as:
- `0` if `if x <= -3`
- `1` if `x >= 3`
- `(x/6) + 0.5` if `-3 < x < 3`
It's a faster, piecewise linear approximation
of the sigmoid activation.
Args:
x: Input tensor.
Reference:
- [Wikipedia "Hard sigmoid"](https://en.wikipedia.org/wiki/Hard_sigmoid)
"""
return ops.hard_sigmoid(x)
@keras_export("keras.activations.log_sigmoid")
def log_sigmoid(x):
"""Logarithm of the sigmoid activation function.
It is defined as `f(x) = log(1 / (1 + exp(-x)))`.
Args:
x: Input tensor.
"""
return ops.log_sigmoid(x)
@keras_export("keras.activations.sparse_sigmoid")
def sparse_sigmoid(x):
"""Sparse sigmoid activation function.
It is defined as
`f(x) = 0` for `x <= -1`,
`f(x) = 0.5 * (x + 1)` for `-1 < x < 1`,
`f(x) = 1` for `x >= 1`.
Args:
x: Input tensor.
Reference:
- [M. Blondel, A. F. T. Martins, V. Niculae, 2019](https://arxiv.org/pdf/1901.02324)
"""
return ops.sparse_sigmoid(x)
@keras_export(["keras.activations.hard_silu", "keras.activations.hard_swish"])
def hard_silu(x):
"""Hard SiLU activation function, also known as Hard Swish.
It is defined as:
- `0` if `if x < -3`
- `x` if `x > 3`
- `x * (x + 3) / 6` if `-3 <= x <= 3`
It's a faster, piecewise linear approximation of the silu activation.
Args:
x: Input tensor.
Reference:
- [A Howard, 2019](https://arxiv.org/abs/1905.02244)
"""
x = backend.convert_to_tensor(x)
return ops.hard_silu(x)
@keras_export("keras.activations.linear")
def linear(x):
"""Linear activation function (pass-through).
A "linear" activation is an identity function:
it returns the input, unmodified.
Args:
x: Input tensor.
"""
return x
| ReLU |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 73194,
"end": 73628
} | class ____(
NamedTuple(
"_JobCanceledData",
[
("error", Optional[SerializableErrorInfo]),
],
)
):
def __new__(cls, error: Optional[SerializableErrorInfo]):
return super().__new__(
cls,
error=truncate_event_error_info(
check.opt_inst_param(error, "error", SerializableErrorInfo)
),
)
@whitelist_for_serdes
| JobCanceledData |
python | django__django | tests/template_tests/filter_tests/test_slice.py | {
"start": 170,
"end": 843
} | class ____(SimpleTestCase):
@setup({"slice01": '{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}'})
def test_slice01(self):
output = self.engine.render_to_string(
"slice01", {"a": "a&b", "b": mark_safe("a&b")}
)
self.assertEqual(output, "&b &b")
@setup(
{
"slice02": (
'{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}'
"{% endautoescape %}"
)
}
)
def test_slice02(self):
output = self.engine.render_to_string(
"slice02", {"a": "a&b", "b": mark_safe("a&b")}
)
self.assertEqual(output, "&b &b")
| SliceTests |
python | pytorch__pytorch | torch/nn/modules/linear.py | {
"start": 353,
"end": 1138
} | class ____(Module):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__()
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return input
| Identity |
python | py-pdf__pypdf | pypdf/filters.py | {
"start": 11588,
"end": 13488
} | class ____:
"""
The ASCIIHexDecode filter decodes data that has been encoded in ASCII
hexadecimal form into a base-7 ASCII format.
"""
@staticmethod
def decode(
data: Union[str, bytes],
decode_parms: Optional[DictionaryObject] = None,
**kwargs: Any,
) -> bytes:
"""
Decode an ASCII-Hex encoded data stream.
Args:
data: a str sequence of hexadecimal-encoded values to be
converted into a base-7 ASCII string
decode_parms: this filter does not use parameters.
Returns:
A string conversion in base-7 ASCII, where each of its values
v is such that 0 <= ord(v) <= 127.
Raises:
PdfStreamError:
"""
if isinstance(data, str):
data = data.encode()
retval = b""
hex_pair = b""
index = 0
while True:
if index >= len(data):
logger_warning(
"missing EOD in ASCIIHexDecode, check if output is OK", __name__
)
break # Reached end of string without an EOD
char = data[index : index + 1]
if char == b">":
break
if char.isspace():
index += 1
continue
hex_pair += char
if len(hex_pair) == 2:
retval += bytes((int(hex_pair, base=16),))
hex_pair = b""
index += 1
# If the filter encounters the EOD marker after reading
# an odd number of hexadecimal digits,
# it shall behave as if a 0 (zero) followed the last digit.
# For every even number of hexadecimal digits, hex_pair is reset to b"".
if hex_pair != b"":
hex_pair += b"0"
retval += bytes((int(hex_pair, base=16),))
return retval
| ASCIIHexDecode |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_output.py | {
"start": 896,
"end": 5504
} | class ____(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
@tf.function(input_signature=[])
def f0000_single_return(self):
return tf.constant(1.0, shape=[1])
# Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])
def f0001_multiple_results_no_punctuation(self):
return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])
# Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])
def f0002_multiple_results_parentheses(self):
return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]))
# Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])
def f0003_multiple_results_brackets(self):
return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])
def f0004_list_2_elements(self):
return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]]
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])
def f0005_dict_2_keys(self):
return {
'x': tf.constant(1.0, shape=[1]),
'y': tf.constant(1.0, shape=[2]),
}
# Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f0006_multiple_return_statements(self, x):
if x > 3.:
return {'x': tf.constant(1.0, shape=[1])}
else:
return {'x': tf.constant(1.0, shape=[1])}
if __name__ == '__main__':
common.do_test(TestModule)
| TestModule |
python | pypa__pipenv | pipenv/patched/pip/_vendor/platformdirs/api.py | {
"start": 250,
"end": 9277
} | class ____(ABC): # noqa: PLR0904
"""Abstract base class for platform directories."""
def __init__( # noqa: PLR0913, PLR0917
self,
appname: str | None = None,
appauthor: str | Literal[False] | None = None,
version: str | None = None,
roaming: bool = False, # noqa: FBT001, FBT002
multipath: bool = False, # noqa: FBT001, FBT002
opinion: bool = True, # noqa: FBT001, FBT002
ensure_exists: bool = False, # noqa: FBT001, FBT002
) -> None:
"""
Create a new platform directory.
:param appname: See `appname`.
:param appauthor: See `appauthor`.
:param version: See `version`.
:param roaming: See `roaming`.
:param multipath: See `multipath`.
:param opinion: See `opinion`.
:param ensure_exists: See `ensure_exists`.
"""
self.appname = appname #: The name of application.
self.appauthor = appauthor
"""
The name of the app author or distributing body for this application.
Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it.
"""
self.version = version
"""
An optional version path element to append to the path.
You might want to use this if you want multiple versions of your app to be able to run independently. If used,
this would typically be ``<major>.<minor>``.
"""
self.roaming = roaming
"""
Whether to use the roaming appdata directory on Windows.
That means that for users on a Windows network setup for roaming profiles, this user data will be synced on
login (see
`here <https://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
"""
self.multipath = multipath
"""
An optional parameter which indicates that the entire list of data dirs should be returned.
By default, the first item would only be returned.
"""
self.opinion = opinion #: A flag to indicating to use opinionated values.
self.ensure_exists = ensure_exists
"""
Optionally create the directory (and any missing parents) upon access if it does not exist.
By default, no directories are created.
"""
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
path = os.path.join(base[0], *params) # noqa: PTH118
self._optionally_create_directory(path)
return path
def _optionally_create_directory(self, path: str) -> None:
if self.ensure_exists:
Path(path).mkdir(parents=True, exist_ok=True)
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
if self.multipath:
# If multipath is True, the first path is returned.
directory = directory.split(os.pathsep)[0]
return Path(directory)
@property
@abstractmethod
def user_data_dir(self) -> str:
""":return: data directory tied to the user"""
@property
@abstractmethod
def site_data_dir(self) -> str:
""":return: data directory shared by users"""
@property
@abstractmethod
def user_config_dir(self) -> str:
""":return: config directory tied to the user"""
@property
@abstractmethod
def site_config_dir(self) -> str:
""":return: config directory shared by the users"""
@property
@abstractmethod
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user"""
@property
@abstractmethod
def site_cache_dir(self) -> str:
""":return: cache directory shared by users"""
@property
@abstractmethod
def user_state_dir(self) -> str:
""":return: state directory tied to the user"""
@property
@abstractmethod
def user_log_dir(self) -> str:
""":return: log directory tied to the user"""
@property
@abstractmethod
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user"""
@property
@abstractmethod
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user"""
@property
@abstractmethod
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user"""
@property
@abstractmethod
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user"""
@property
@abstractmethod
def user_music_dir(self) -> str:
""":return: music directory tied to the user"""
@property
@abstractmethod
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user"""
@property
@abstractmethod
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user"""
@property
@abstractmethod
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users"""
@property
def user_data_path(self) -> Path:
""":return: data path tied to the user"""
return Path(self.user_data_dir)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users"""
return Path(self.site_data_dir)
@property
def user_config_path(self) -> Path:
""":return: config path tied to the user"""
return Path(self.user_config_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users"""
return Path(self.site_config_dir)
@property
def user_cache_path(self) -> Path:
""":return: cache path tied to the user"""
return Path(self.user_cache_dir)
@property
def site_cache_path(self) -> Path:
""":return: cache path shared by users"""
return Path(self.site_cache_dir)
@property
def user_state_path(self) -> Path:
""":return: state path tied to the user"""
return Path(self.user_state_dir)
@property
def user_log_path(self) -> Path:
""":return: log path tied to the user"""
return Path(self.user_log_dir)
@property
def user_documents_path(self) -> Path:
""":return: documents a path tied to the user"""
return Path(self.user_documents_dir)
@property
def user_downloads_path(self) -> Path:
""":return: downloads path tied to the user"""
return Path(self.user_downloads_dir)
@property
def user_pictures_path(self) -> Path:
""":return: pictures path tied to the user"""
return Path(self.user_pictures_dir)
@property
def user_videos_path(self) -> Path:
""":return: videos path tied to the user"""
return Path(self.user_videos_dir)
@property
def user_music_path(self) -> Path:
""":return: music path tied to the user"""
return Path(self.user_music_dir)
@property
def user_desktop_path(self) -> Path:
""":return: desktop path tied to the user"""
return Path(self.user_desktop_dir)
@property
def user_runtime_path(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir)
@property
def site_runtime_path(self) -> Path:
""":return: runtime path shared by users"""
return Path(self.site_runtime_dir)
def iter_config_dirs(self) -> Iterator[str]:
""":yield: all user and site configuration directories."""
yield self.user_config_dir
yield self.site_config_dir
def iter_data_dirs(self) -> Iterator[str]:
""":yield: all user and site data directories."""
yield self.user_data_dir
yield self.site_data_dir
def iter_cache_dirs(self) -> Iterator[str]:
""":yield: all user and site cache directories."""
yield self.user_cache_dir
yield self.site_cache_dir
def iter_runtime_dirs(self) -> Iterator[str]:
""":yield: all user and site runtime directories."""
yield self.user_runtime_dir
yield self.site_runtime_dir
def iter_config_paths(self) -> Iterator[Path]:
""":yield: all user and site configuration paths."""
for path in self.iter_config_dirs():
yield Path(path)
def iter_data_paths(self) -> Iterator[Path]:
""":yield: all user and site data paths."""
for path in self.iter_data_dirs():
yield Path(path)
def iter_cache_paths(self) -> Iterator[Path]:
""":yield: all user and site cache paths."""
for path in self.iter_cache_dirs():
yield Path(path)
def iter_runtime_paths(self) -> Iterator[Path]:
""":yield: all user and site runtime paths."""
for path in self.iter_runtime_dirs():
yield Path(path)
| PlatformDirsABC |
python | django__django | tests/template_tests/test_partials.py | {
"start": 7753,
"end": 21595
} | class ____(TestCase):
@setup(
{
"partial_source_success_template": (
"{% partialdef test-partial %}\n"
"TEST-PARTIAL-CONTENT\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_find_partial_source_success(self):
template = self.engine.get_template("partial_source_success_template")
partial_proxy = template.extra_data["partials"]["test-partial"]
expected = """{% partialdef test-partial %}
TEST-PARTIAL-CONTENT
{% endpartialdef %}"""
self.assertEqual(partial_proxy.source.strip(), expected.strip())
@setup(
{
"partial_source_with_inline_template": (
"{% partialdef inline-partial inline %}\n"
"INLINE-CONTENT\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_find_partial_source_with_inline(self):
template = self.engine.get_template("partial_source_with_inline_template")
partial_proxy = template.extra_data["partials"]["inline-partial"]
expected = """{% partialdef inline-partial inline %}
INLINE-CONTENT
{% endpartialdef %}"""
self.assertEqual(partial_proxy.source.strip(), expected.strip())
def test_find_partial_source_fallback_cases(self):
cases = {"None offsets": (None, None), "Out of bounds offsets": (10, 20)}
for name, (source_start, source_end) in cases.items():
with self.subTest(name):
partial = PartialTemplate(
NodeList(),
Origin("test"),
"test",
source_start=source_start,
source_end=source_end,
)
result = partial.find_partial_source("nonexistent-partial")
self.assertEqual(result, "")
@setup(
{
"empty_partial_template": ("{% partialdef empty %}{% endpartialdef %}"),
},
debug_only=True,
)
def test_find_partial_source_empty_partial(self):
template = self.engine.get_template("empty_partial_template")
partial_proxy = template.extra_data["partials"]["empty"]
result = partial_proxy.find_partial_source(template.source)
self.assertEqual(result, "{% partialdef empty %}{% endpartialdef %}")
@setup(
{
"consecutive_partials_template": (
"{% partialdef empty %}{% endpartialdef %}"
"{% partialdef other %}...{% endpartialdef %}"
),
},
debug_only=True,
)
def test_find_partial_source_multiple_consecutive_partials(self):
template = self.engine.get_template("consecutive_partials_template")
empty_proxy = template.extra_data["partials"]["empty"]
other_proxy = template.extra_data["partials"]["other"]
empty_result = empty_proxy.find_partial_source(template.source)
self.assertEqual(empty_result, "{% partialdef empty %}{% endpartialdef %}")
other_result = other_proxy.find_partial_source(template.source)
self.assertEqual(other_result, "{% partialdef other %}...{% endpartialdef %}")
def test_partials_with_duplicate_names(self):
test_cases = [
(
"nested",
"""
{% partialdef duplicate %}{% partialdef duplicate %}
CONTENT
{% endpartialdef %}{% endpartialdef %}
""",
),
(
"conditional",
"""
{% if ... %}
{% partialdef duplicate %}
CONTENT
{% endpartialdef %}
{% else %}
{% partialdef duplicate %}
OTHER-CONTENT
{% endpartialdef %}
{% endif %}
""",
),
]
for test_name, template_source in test_cases:
with self.subTest(test_name=test_name):
with self.assertRaisesMessage(
TemplateSyntaxError,
"Partial 'duplicate' is already defined in the "
"'template.html' template.",
):
Template(template_source, origin=Origin(name="template.html"))
@setup(
{
"named_end_tag_template": (
"{% partialdef thing %}CONTENT{% endpartialdef thing %}"
),
},
debug_only=True,
)
def test_find_partial_source_supports_named_end_tag(self):
template = self.engine.get_template("named_end_tag_template")
partial_proxy = template.extra_data["partials"]["thing"]
result = partial_proxy.find_partial_source(template.source)
self.assertEqual(
result, "{% partialdef thing %}CONTENT{% endpartialdef thing %}"
)
@setup(
{
"nested_partials_basic_template": (
"{% partialdef outer %}"
"{% partialdef inner %}...{% endpartialdef %}"
"{% endpartialdef %}"
),
},
debug_only=True,
)
def test_find_partial_source_supports_nested_partials(self):
template = self.engine.get_template("nested_partials_basic_template")
empty_proxy = template.extra_data["partials"]["outer"]
other_proxy = template.extra_data["partials"]["inner"]
outer_result = empty_proxy.find_partial_source(template.source)
self.assertEqual(
outer_result,
(
"{% partialdef outer %}{% partialdef inner %}"
"...{% endpartialdef %}{% endpartialdef %}"
),
)
inner_result = other_proxy.find_partial_source(template.source)
self.assertEqual(inner_result, "{% partialdef inner %}...{% endpartialdef %}")
@setup(
{
"nested_partials_named_end_template": (
"{% partialdef outer %}"
"{% partialdef inner %}...{% endpartialdef inner %}"
"{% endpartialdef outer %}"
),
},
debug_only=True,
)
def test_find_partial_source_supports_nested_partials_and_named_end_tags(self):
template = self.engine.get_template("nested_partials_named_end_template")
empty_proxy = template.extra_data["partials"]["outer"]
other_proxy = template.extra_data["partials"]["inner"]
outer_result = empty_proxy.find_partial_source(template.source)
self.assertEqual(
outer_result,
(
"{% partialdef outer %}{% partialdef inner %}"
"...{% endpartialdef inner %}{% endpartialdef outer %}"
),
)
inner_result = other_proxy.find_partial_source(template.source)
self.assertEqual(
inner_result, "{% partialdef inner %}...{% endpartialdef inner %}"
)
@setup(
{
"nested_partials_mixed_end_1_template": (
"{% partialdef outer %}"
"{% partialdef inner %}...{% endpartialdef %}"
"{% endpartialdef outer %}"
),
},
debug_only=True,
)
def test_find_partial_source_supports_nested_partials_and_mixed_end_tags_1(self):
template = self.engine.get_template("nested_partials_mixed_end_1_template")
empty_proxy = template.extra_data["partials"]["outer"]
other_proxy = template.extra_data["partials"]["inner"]
outer_result = empty_proxy.find_partial_source(template.source)
self.assertEqual(
outer_result,
(
"{% partialdef outer %}{% partialdef inner %}"
"...{% endpartialdef %}{% endpartialdef outer %}"
),
)
inner_result = other_proxy.find_partial_source(template.source)
self.assertEqual(inner_result, "{% partialdef inner %}...{% endpartialdef %}")
@setup(
{
"nested_partials_mixed_end_2_template": (
"{% partialdef outer %}"
"{% partialdef inner %}...{% endpartialdef inner %}"
"{% endpartialdef %}"
),
},
debug_only=True,
)
def test_find_partial_source_supports_nested_partials_and_mixed_end_tags_2(self):
template = self.engine.get_template("nested_partials_mixed_end_2_template")
empty_proxy = template.extra_data["partials"]["outer"]
other_proxy = template.extra_data["partials"]["inner"]
outer_result = empty_proxy.find_partial_source(template.source)
self.assertEqual(
outer_result,
(
"{% partialdef outer %}{% partialdef inner %}"
"...{% endpartialdef inner %}{% endpartialdef %}"
),
)
inner_result = other_proxy.find_partial_source(template.source)
self.assertEqual(
inner_result, "{% partialdef inner %}...{% endpartialdef inner %}"
)
@setup(
{
"partial_embedded_in_verbatim": (
"{% verbatim %}\n"
"{% partialdef testing-name %}\n"
"<p>Should be ignored</p>"
"{% endpartialdef testing-name %}\n"
"{% endverbatim %}\n"
"{% partialdef testing-name %}\n"
"<p>Content</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_template_embedded_in_verbatim(self):
template = self.engine.get_template("partial_embedded_in_verbatim")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(
partial_template.source,
"{% partialdef testing-name %}\n<p>Content</p>\n{% endpartialdef %}",
)
@setup(
{
"partial_debug_source": (
"{% partialdef testing-name %}\n"
"<p>Content</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_source_uses_offsets_in_debug(self):
template = self.engine.get_template("partial_debug_source")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(partial_template._source_start, 0)
self.assertEqual(partial_template._source_end, 64)
expected = template.source[
partial_template._source_start : partial_template._source_end
]
self.assertEqual(partial_template.source, expected)
@setup(
{
"partial_embedded_in_named_verbatim": (
"{% verbatim block1 %}\n"
"{% partialdef testing-name %}\n"
"{% endverbatim block1 %}\n"
"{% partialdef testing-name %}\n"
"<p>Named Content</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_template_embedded_in_named_verbatim(self):
template = self.engine.get_template("partial_embedded_in_named_verbatim")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(
"{% partialdef testing-name %}\n<p>Named Content</p>\n{% endpartialdef %}",
partial_template.source,
)
@setup(
{
"partial_embedded_in_comment_block": (
"{% comment %}\n"
"{% partialdef testing-name %}\n"
"{% endcomment %}\n"
"{% partialdef testing-name %}\n"
"<p>Comment Content</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_template_embedded_in_comment_block(self):
template = self.engine.get_template("partial_embedded_in_comment_block")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(
partial_template.source,
"{% partialdef testing-name %}\n"
"<p>Comment Content</p>\n"
"{% endpartialdef %}",
)
@setup(
{
"partial_embedded_in_inline_comment": (
"{# {% partialdef testing-name %} #}\n"
"{% partialdef testing-name %}\n"
"<p>Inline Comment Content</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_template_embedded_in_inline_comment(self):
template = self.engine.get_template("partial_embedded_in_inline_comment")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(
partial_template.source,
"{% partialdef testing-name %}\n"
"<p>Inline Comment Content</p>\n"
"{% endpartialdef %}",
)
@setup(
{
"partial_contains_fake_end_inside_verbatim": (
"{% partialdef testing-name %}\n"
"{% verbatim %}{% endpartialdef %}{% endverbatim %}\n"
"<p>Body</p>\n"
"{% endpartialdef %}\n"
),
},
debug_only=True,
)
def test_partial_template_contains_fake_end_inside_verbatim(self):
template = self.engine.get_template("partial_contains_fake_end_inside_verbatim")
partial_template = template.extra_data["partials"]["testing-name"]
self.assertEqual(
partial_template.source,
"{% partialdef testing-name %}\n"
"{% verbatim %}{% endpartialdef %}{% endverbatim %}\n"
"<p>Body</p>\n"
"{% endpartialdef %}",
)
| FindPartialSourceTests |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 18169,
"end": 22032
} | class ____(URLPatternsTestCase, TestCase):
client_class = APIClient
urlpatterns = [
path('path/', include(url_path_router.urls)),
path('default/', include(notes_path_default_router.urls)),
path('example/', include(notes_path_router.urls)),
]
def setUp(self):
RouterTestModel.objects.create(uuid='123', text='foo bar')
RouterTestModel.objects.create(uuid='a b', text='baz qux')
def test_create(self):
new_note = {
'uuid': 'foo',
'text': 'example'
}
response = self.client.post('/example/notes/', data=new_note)
assert response.status_code == 201
assert response['location'] == 'http://testserver/example/notes/foo/'
assert response.data == {"url": "http://testserver/example/notes/foo/", "uuid": "foo", "text": "example"}
assert RouterTestModel.objects.filter(uuid='foo').exists()
def test_retrieve(self):
for url in ('/example/notes/123/', '/default/notes/123/'):
with self.subTest(url=url):
response = self.client.get(url)
assert response.status_code == 200
# only gets example path since was the last to be registered
assert response.data == {"url": "http://testserver/example/notes/123/", "uuid": "123", "text": "foo bar"}
def test_list(self):
for url in ('/example/notes/', '/default/notes/'):
with self.subTest(url=url):
response = self.client.get(url)
assert response.status_code == 200
# only gets example path since was the last to be registered
assert response.data == [
{"url": "http://testserver/example/notes/123/", "uuid": "123", "text": "foo bar"},
{"url": "http://testserver/example/notes/a%20b/", "uuid": "a b", "text": "baz qux"},
]
def test_update(self):
updated_note = {
'text': 'foo bar example'
}
response = self.client.patch('/example/notes/123/', data=updated_note)
assert response.status_code == 200
assert response.data == {"url": "http://testserver/example/notes/123/", "uuid": "123", "text": "foo bar example"}
def test_delete(self):
response = self.client.delete('/example/notes/123/')
assert response.status_code == 204
assert not RouterTestModel.objects.filter(uuid='123').exists()
def test_list_extra_action(self):
kwarg = 1234
response = self.client.get(f'/path/list/{kwarg}/')
assert response.status_code == 200
assert json.loads(response.content.decode()) == {'kwarg': kwarg}
def test_detail_extra_action(self):
pk = '1'
kwarg = 1234
response = self.client.get(f'/path/{pk}/detail/{kwarg}/')
assert response.status_code == 200
assert json.loads(response.content.decode()) == {'pk': pk, 'kwarg': kwarg}
def test_detail_extra_other_action(self):
# this to assure that ambiguous patterns are interpreted correctly
# using the `path` converters this URL is recognized to match the pattern
# of `UrlPathViewSet.url_path_detail` when it should match
# `UrlPathViewSet.url_path_detail_multiple_params`
pk = '1'
kwarg = 1234
param = 2
response = self.client.get('/path/1/detail/1234/detail/2/')
assert response.status_code == 200
assert json.loads(response.content.decode()) == {'pk': pk, 'kwarg': kwarg, 'param': param}
def test_defaultrouter_root(self):
response = self.client.get('/default/')
assert response.status_code == 200
# only gets example path since was the last to be registered
assert response.data == {"notes": "http://testserver/example/notes/"}
| TestUrlPath |
python | plotly__plotly.py | plotly/graph_objs/layout/xaxis/_minor.py | {
"start": 235,
"end": 20338
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.minor"
_valid_props = {
"dtick",
"gridcolor",
"griddash",
"gridwidth",
"nticks",
"showgrid",
"tick0",
"tickcolor",
"ticklen",
"tickmode",
"ticks",
"tickvals",
"tickvalssrc",
"tickwidth",
}
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def _prop_descriptions(self):
return """\
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickcolor
Sets the tick color.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
"""
def __init__(
self,
arg=None,
dtick=None,
gridcolor=None,
griddash=None,
gridwidth=None,
nticks=None,
showgrid=None,
tick0=None,
tickcolor=None,
ticklen=None,
tickmode=None,
ticks=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
**kwargs,
):
"""
Construct a new Minor object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Minor`
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickcolor
Sets the tick color.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
Returns
-------
Minor
"""
super().__init__("minor")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.xaxis.Minor
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Minor`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtick", arg, dtick)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("nticks", arg, nticks)
self._set_property("showgrid", arg, showgrid)
self._set_property("tick0", arg, tick0)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("ticks", arg, ticks)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Minor |
python | django__django | tests/admin_views/tests.py | {
"start": 77144,
"end": 79412
} | class ____(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_change_form_URL_has_correct_value(self):
"""
change_view has form_url in response.context
"""
response = self.client.get(
reverse(
"admin:admin_views_section_change",
args=(self.s1.pk,),
current_app=self.current_app,
)
)
self.assertIn(
"form_url", response.context, msg="form_url not present in response.context"
)
self.assertEqual(response.context["form_url"], "pony")
def test_initial_data_can_be_overridden(self):
"""
The behavior for setting initial form data can be overridden in the
ModelAdmin class. Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse("admin:admin_views_restaurant_add", current_app=self.current_app),
{"name": "test_value"},
)
# this would be the usual behavior
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behavior
self.assertContains(response, 'value="overridden_value"')
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminViewFormUrlTest |
python | apache__airflow | providers/fab/tests/unit/fab/plugins/test_plugin.py | {
"start": 4994,
"end": 5133
} | class ____(AirflowPlugin):
name = "preload"
def on_load(self, *args, **kwargs):
self.name = "postload"
| AirflowTestOnLoadPlugin |
python | django__django | django/views/generic/dates.py | {
"start": 20594,
"end": 22209
} | class ____(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Base detail view for a single object on a single date; this differs from
the standard DetailView by accepting a year/month/day in the URL.
This requires subclassing to provide a response mixin.
"""
def get_object(self, queryset=None):
"""Get the object this request displays."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(
year,
self.get_year_format(),
month,
self.get_month_format(),
day,
self.get_day_format(),
)
# Use a custom queryset if provided
qs = self.get_queryset() if queryset is None else queryset
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(
_(
"Future %(verbose_name_plural)s not available because "
"%(class_name)s.allow_future is False."
)
% {
"verbose_name_plural": qs.model._meta.verbose_name_plural,
"class_name": self.__class__.__name__,
}
)
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super().get_object(queryset=qs)
| BaseDateDetailView |
python | run-llama__llama_index | llama-index-core/llama_index/core/llms/llm.py | {
"start": 2357,
"end": 2493
} | class ____(Protocol):
def __call__(self, messages: Sequence[ChatMessage]) -> str:
pass
@runtime_checkable
| MessagesToPromptType |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 26527,
"end": 30709
} | class ____(_WeightedLoss):
r"""Creates a criterion that measures the Binary Cross Entropy between the target and
the input probabilities:
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
mathematically undefined in the above loss equation. PyTorch chooses to set
:math:`\log (0) = -\infty`, since :math:`\lim_{x\to 0} \log (x) = -\infty`.
However, an infinite term in the loss equation is not desirable for several reasons.
For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
we would also have an infinite term in our gradient, since
:math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`.
This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
and using it for things like linear regression would not be straight-forward.
Our solution is that BCELoss clamps its log function outputs to be greater than
or equal to -100. This way, we can always have a finite loss value and a linear
backward method.
Args:
weight (Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size `nbatch`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
shape as input.
Examples:
>>> m = nn.Sigmoid()
>>> loss = nn.BCELoss()
>>> input = torch.randn(3, 2, requires_grad=True)
>>> target = torch.rand(3, 2, requires_grad=False)
>>> output = loss(m(input), target)
>>> output.backward()
"""
__constants__ = ["reduction"]
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.binary_cross_entropy(
input, target, weight=self.weight, reduction=self.reduction
)
| BCELoss |
python | marshmallow-code__marshmallow | src/marshmallow/exceptions.py | {
"start": 1961,
"end": 2101
} | class ____(MarshmallowError, TypeError):
"""Raised when a string is passed when a list of strings is expected."""
| StringNotCollectionError |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 8042,
"end": 8226
} | class ____(CalculatorTestQc):
"Represents a query compiler with no costing information, but different."
def get_backend(self):
return "Test_Casting_Default_2"
| DefaultQC2 |
python | keras-team__keras | keras/src/backend/jax/trainer.py | {
"start": 863,
"end": 35736
} | class ____(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
self._jax_state_synced = True
def compute_loss_and_updates(
self,
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=False,
optimizer_variables=None,
):
"""This method is stateless and is intended for use with jax.grad."""
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = training
# Run stateless forward pass
y_pred, non_trainable_variables, losses = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
return_losses=True,
**kwargs,
)
if losses:
# Make forward pass losses available to compute_loss.
self._losses_override.clear()
self._losses_override = losses
loss, variables = self.stateless_compute_loss(
trainable_variables,
non_trainable_variables,
metrics_variables,
x=x,
y=y,
y_pred=y_pred,
sample_weight=sample_weight,
training=training,
)
if losses:
self._losses_override.clear()
(trainable_variables, non_trainable_variables, metrics_variables) = (
variables
)
# Handle loss scaling
unscaled_loss = loss
if training and self.optimizer is not None:
# Scale loss with a StatelessScope, to use an update scale variable.
mapping = list(zip(self.optimizer.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping):
loss = self.optimizer.scale_loss(loss)
return loss, (
unscaled_loss,
y_pred,
non_trainable_variables,
metrics_variables,
)
def _update_metrics_variables(
self, metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
):
with backend.StatelessScope(
state_mapping=[
(ref_v, v)
for ref_v, v in zip(self.metrics_variables, metrics_variables)
]
) as scope:
self._loss_tracker.update_state(
unscaled_loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
logs = self.compute_metrics(x, y, y_pred, sample_weight)
new_metrics_variables = []
for ref_v in self.metrics_variables:
new_v = scope.get_current_value(ref_v)
if new_v is None:
new_v = ref_v.value
new_metrics_variables.append(new_v)
return logs, new_metrics_variables
def train_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
grad_fn = jax.value_and_grad(
self.compute_loss_and_updates, has_aux=True
)
(loss, aux), grads = grad_fn(
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=True,
optimizer_variables=optimizer_variables,
)
(unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = (
aux
)
(
trainable_variables,
optimizer_variables,
) = self.optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
logs, metrics_variables = self._update_metrics_variables(
metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
)
state = (
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
)
return logs, state
def test_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
loss, aux = self.compute_loss_and_updates(
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=False,
)
(unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = (
aux
)
logs, metrics_variables = self._update_metrics_variables(
metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
)
state = (
trainable_variables,
non_trainable_variables,
metrics_variables,
)
return logs, state
def predict_step(self, state, data):
trainable_variables, non_trainable_variables = state
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = False
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
outputs, non_trainable_variables = self.stateless_call(
trainable_variables, non_trainable_variables, x, **kwargs
)
return outputs, non_trainable_variables
def _make_function(self, step_function, concatenate_outputs=False):
if self.steps_per_execution > 1:
if concatenate_outputs:
def concatenate(outputs):
output = outputs[0]
for next_output in outputs[1:]:
output = tree.map_structure(
lambda t1, t2: jax.numpy.concatenate([t1, t2]),
output,
next_output,
)
return output
if not self.run_eagerly and self.jit_compile:
concatenate = jit(concatenate)
def iterator_step(state, iterator):
data = next(iterator)
outputs, state = step_function(state, data)
outputs = [outputs]
try:
for _ in range(self.steps_per_execution - 1):
data = next(iterator)
_outputs, state = step_function(state, data)
outputs.append(_outputs)
except StopIteration:
pass
outputs = concatenate(outputs)
return outputs, state
else:
def iterator_step(state, iterator):
data = next(iterator)
outputs, state = step_function(state, data)
try:
for _ in range(self.steps_per_execution - 1):
data = next(iterator)
outputs, state = step_function(state, data)
except StopIteration:
pass
return outputs, state
else:
def iterator_step(state, iterator):
return step_function(state, next(iterator))
return iterator_step
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
state_shardings = self._get_state_sharding_spec()
out_shardings = (None, state_shardings)
if is_nnx_enabled():
step_fn = lambda state, data: type(self).train_step(
self, state, data
)
else:
step_fn = self.train_step
train_step = jit(
step_fn,
donate_argnums=0,
out_shardings=out_shardings,
)
else:
train_step = self.train_step
step_function = self._make_function(train_step)
self.train_function = step_function
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
(
trainable_shardings,
non_trainable_shardings,
_, # optimizer_shardings
metrics_shardings,
) = self._get_state_sharding_spec()
state_shardings = (
trainable_shardings,
non_trainable_shardings,
metrics_shardings,
)
out_shardings = (None, state_shardings)
if is_nnx_enabled():
step_fn = lambda state, data: type(self).test_step(
self, state, data
)
else:
step_fn = self.test_step
test_step = jit(
step_fn,
donate_argnums=0,
out_shardings=out_shardings,
)
else:
test_step = self.test_step
step_function = self._make_function(test_step)
self.test_function = step_function
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def predict_step(state, data):
outputs, non_trainable_variables = self.predict_step(state, data)
return outputs, (state[0], non_trainable_variables)
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
(
trainable_shardings,
non_trainable_shardings,
_, # optimizer_shardings
_, # metrics_shardings
) = self._get_state_sharding_spec()
state_shardings = (
trainable_shardings,
non_trainable_shardings,
)
out_shardings = (None, state_shardings)
predict_step = jit(
predict_step,
donate_argnums=0,
out_shardings=out_shardings,
)
_step_function = self._make_function(
predict_step, concatenate_outputs=True
)
def step_function(state, iterator):
outputs, state = _step_function(state, iterator)
return outputs, state
self.predict_function = step_function
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
self._assert_compile_called("fit")
# Possibly cap epochs for debugging runs.
max_epochs = config.max_epochs()
if max_epochs and max_epochs < epochs:
warnings.warn("Limiting epochs to %d" % max_epochs)
epochs = max_epochs
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
(
(x, y, sample_weight),
validation_data,
) = array_slicing.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_train_function()
self.stop_training = False
training_logs = {}
training_finished = False
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
try:
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
self._jax_state_synced = True
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
# Callbacks
callbacks.on_train_batch_begin(begin_step)
# Train step
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.train_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state
# sync if they need to.
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_train_batch_end(end_step, logs)
if self.stop_training:
# Stop training if a callback has set
# this flag in on_(train_)batch_end.
break
# Reattach state to the model
# (if not already done by a callback).
# NOTE: doing this after each step would be a big performance
# bottleneck.
self.jax_state_sync()
# Override with model metrics instead of last step logs if
# needed.
epoch_logs = dict(self._get_metrics_result_or_logs(logs))
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create JAXEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = JAXEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
f"val_{name}": val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
training_finished = True
finally:
self.jax_state_sync()
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs
# are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
if training_finished:
callbacks.on_train_end(logs=training_logs)
self._jax_state = None
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
self._assert_compile_called("evaluate")
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of
# input/target data.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
self._jax_state_synced = True
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.test_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state sync
# if they need to.
self._jax_state = {
# I wouldn't recommend modifying non-trainable model state
# during evaluate(), but it's allowed.
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
# Reattach state back to model (if not already done by a callback).
self.jax_state_sync()
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
self._jax_state = None
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = JAXEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
if not all(layer.built for layer in self._flatten_layers()):
# Build the model on one batch of data.
for _, _, iterator in epoch_iterator:
# Build model
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(
next(iterator)
)
if is_nnx_enabled():
self(x)
else:
with backend.StatelessScope():
self(x)
break
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self._jax_state_synced = True
outputs = None
non_trainable_variables = None
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
batch_outputs, state = self.predict_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
) = state
self._jax_state = {
"trainable_variables": trainable_variables,
# I wouldn't recommend modifying non-trainable model state
# during predict(), but it's allowed.
"non_trainable_variables": non_trainable_variables,
}
outputs = append_to_outputs(batch_outputs, outputs)
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_predict_batch_end(
end_step, {"outputs": batch_outputs}
)
if self.stop_predicting:
break
self.jax_state_sync()
callbacks.on_predict_end()
self._jax_state = None
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
def data():
yield _distribute_data((x, y, sample_weight))
# Maybe build model
self._symbolic_build(data_batch=next(data()))
self.make_train_function()
# Train step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.train_function(state, data())
# State sync
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
def data():
yield _distribute_data((x, y, sample_weight))
# Maybe build model
self._symbolic_build(data_batch=next(data()))
self.make_test_function()
# Test step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.test_function(state, data())
# State sync
trainable_variables, non_trainable_variables, metrics_variables = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values.
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
if not all(layer.built for layer in self._flatten_layers()):
# Build model
with backend.StatelessScope():
self(x)
self.make_predict_function()
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=False,
purge_model_variables=False,
)
self._jax_state_synced = False
def data():
yield (x,)
batch_outputs, state = self.predict_function(state, data())
trainable_variables, non_trainable_variables = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
}
self.jax_state_sync()
batch_outputs = tree.map_structure(lambda x: np.array(x), batch_outputs)
return batch_outputs
def jax_state_sync(self):
if not getattr(self, "_jax_state", None) or self._jax_state_synced:
return
trainable_variables = self._jax_state.get("trainable_variables", None)
non_trainable_variables = self._jax_state.get(
"non_trainable_variables", None
)
optimizer_variables = self._jax_state.get("optimizer_variables", None)
metrics_variables = self._jax_state.get("metrics_variables", None)
if trainable_variables:
for ref_v, v in zip(self.trainable_variables, trainable_variables):
ref_v.assign(v)
if non_trainable_variables:
for ref_v, v in zip(
self.non_trainable_variables, non_trainable_variables
):
ref_v.assign(v)
if optimizer_variables:
for ref_v, v in zip(self.optimizer.variables, optimizer_variables):
ref_v.assign(v)
if metrics_variables:
for ref_v, v in zip(self.metrics_variables, metrics_variables):
ref_v.assign(v)
self._jax_state_synced = True
def _get_state_sharding_spec(self):
trainable_shardings = [
v.value.sharding for v in self.trainable_variables
]
non_trainable_shardings = [
v.value.sharding for v in self.non_trainable_variables
]
if hasattr(self, "optimizer") and self.optimizer is not None:
optimizer_shardings = [
v.value.sharding for v in self.optimizer.variables
]
else:
optimizer_shardings = []
metrics_shardings = [v.value.sharding for v in self.metrics_variables]
return (
trainable_shardings,
non_trainable_shardings,
optimizer_shardings,
metrics_shardings,
)
def _purge_model_variables(
self,
trainable_variables=False,
non_trainable_variables=False,
optimizer_variables=False,
metrics_variables=False,
):
"""Remove all the model variable for memory saving.
During JAX training, since the training function is stateless, we have
to pass in and get the model weights over and over, during which the
copy of the weights that attached to the Variable are still and
occupying extra memory. We remove those variable to save memory (for
better memory utilization) at the beginning of the epoch, and reattach
the value back to variables at the end of the epoch, via
`jax_state_sync()`.
"""
if trainable_variables:
for v in self.trainable_variables:
v._value = None
if non_trainable_variables:
for v in self.non_trainable_variables:
v._value = None
if optimizer_variables:
for v in self.optimizer.variables:
v._value = None
if metrics_variables:
for v in self.metrics_variables:
v._value = None
def _get_jax_state(
self,
trainable_variables=False,
non_trainable_variables=False,
optimizer_variables=False,
metrics_variables=False,
purge_model_variables=False,
):
state = []
if trainable_variables:
state.append([v.value for v in self.trainable_variables])
if non_trainable_variables:
state.append([v.value for v in self.non_trainable_variables])
if optimizer_variables:
state.append([v.value for v in self.optimizer.variables])
if metrics_variables:
state.append([v.value for v in self.metrics_variables])
if purge_model_variables:
self._purge_model_variables(
trainable_variables=trainable_variables,
non_trainable_variables=non_trainable_variables,
optimizer_variables=optimizer_variables,
metrics_variables=metrics_variables,
)
return tuple(state)
def _distribute_data(data, layouts=None):
distribution = distribution_lib.distribution()
if distribution is not None:
if layouts is None:
layouts = tree.map_structure(
lambda d: distribution.get_data_layout(d.shape),
data,
)
jax_dist_data_input = partial(
jax_distribution_lib.distribute_data_input,
batch_dim_name=distribution.batch_dim_name,
)
return tree.map_structure(jax_dist_data_input, data, layouts)
return tree.map_structure(jax.device_put, data)
| JAXTrainer |
python | mlflow__mlflow | mlflow/utils/autologging_utils/logging_and_warnings.py | {
"start": 10259,
"end": 14249
} | class ____:
"""
Threadsafe context manager that modifies the behavior of MLflow event logging statements
and MLflow warnings upon entry, according to the specified parameters. Modifications are
applied globally across all threads and are not reverted until all threads that have made
a particular modification have exited the context.
Args:
disable_event_logs: If `True`, disable (mute & discard) MLflow event logging statements.
If `False`, do not disable MLflow event logging statements.
disable_warnings: If `True`, disable (mutate & discard) MLflow warnings. If `False`,
do not disable MLflow warnings.
reroute_warnings: If `True`, reroute MLflow warnings to an MLflow event logger with
level WARNING. If `False`, do not reroute MLflow warnings.
"""
_lock = RLock()
_disable_event_logs_count = 0
_disable_warnings_count = 0
_reroute_warnings_count = 0
def __init__(self, disable_event_logs, disable_warnings, reroute_warnings):
self._disable_event_logs = disable_event_logs
self._disable_warnings = disable_warnings
self._reroute_warnings = reroute_warnings
def __enter__(self):
self._enter_impl()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._exit_impl(exc_type, exc_val, exc_tb)
async def __aenter__(self):
self._enter_impl()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._exit_impl(exc_type, exc_val, exc_tb)
def _enter_impl(self):
try:
with MlflowEventsAndWarningsBehaviorGlobally._lock:
if self._disable_event_logs:
if MlflowEventsAndWarningsBehaviorGlobally._disable_event_logs_count <= 0:
logging_utils.disable_logging()
MlflowEventsAndWarningsBehaviorGlobally._disable_event_logs_count += 1
if self._disable_warnings:
if MlflowEventsAndWarningsBehaviorGlobally._disable_warnings_count <= 0:
_WARNINGS_CONTROLLER.set_mlflow_warnings_disablement_state_globally(
disabled=True
)
MlflowEventsAndWarningsBehaviorGlobally._disable_warnings_count += 1
if self._reroute_warnings:
if MlflowEventsAndWarningsBehaviorGlobally._reroute_warnings_count <= 0:
_WARNINGS_CONTROLLER.set_mlflow_warnings_rerouting_state_globally(
rerouted=True
)
MlflowEventsAndWarningsBehaviorGlobally._reroute_warnings_count += 1
except Exception:
pass
def _exit_impl(self, *args, **kwargs):
try:
with MlflowEventsAndWarningsBehaviorGlobally._lock:
if self._disable_event_logs:
MlflowEventsAndWarningsBehaviorGlobally._disable_event_logs_count -= 1
if self._disable_warnings:
MlflowEventsAndWarningsBehaviorGlobally._disable_warnings_count -= 1
if self._reroute_warnings:
MlflowEventsAndWarningsBehaviorGlobally._reroute_warnings_count -= 1
if MlflowEventsAndWarningsBehaviorGlobally._disable_event_logs_count <= 0:
logging_utils.enable_logging()
if MlflowEventsAndWarningsBehaviorGlobally._disable_warnings_count <= 0:
_WARNINGS_CONTROLLER.set_mlflow_warnings_disablement_state_globally(
disabled=False
)
if MlflowEventsAndWarningsBehaviorGlobally._reroute_warnings_count <= 0:
_WARNINGS_CONTROLLER.set_mlflow_warnings_rerouting_state_globally(
rerouted=False
)
except Exception:
pass
| MlflowEventsAndWarningsBehaviorGlobally |
python | ray-project__ray | rllib/policy/torch_mixins.py | {
"start": 1906,
"end": 3416
} | class ____:
"""Mixin for TorchPolicy that adds entropy coeff decay."""
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self._entropy_coeff_schedule = None
# Disable any scheduling behavior related to learning if Learner API is active.
# Schedules are handled by Learner class.
if entropy_coeff_schedule is None:
self.entropy_coeff = entropy_coeff
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self._entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1],
framework=None,
)
else:
# Implements previous version but enforces outside_value
self._entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0,
framework=None,
)
self.entropy_coeff = self._entropy_coeff_schedule.value(0)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
if self._entropy_coeff_schedule is not None:
self.entropy_coeff = self._entropy_coeff_schedule.value(
global_vars["timestep"]
)
@OldAPIStack
| EntropyCoeffSchedule |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/trace_type_builder.py | {
"start": 2198,
"end": 4122
} | class ____(trace.PlaceholderContext):
"""Container with mappings shared across TraceTypes for placeholder values."""
def __init__(self,
context_graph=None,
placeholder_mapping=None,
unnest_only=False,
with_none_control_dependencies=False,
composite_device_name=None):
self._alias_id_to_placeholder = placeholder_mapping or {}
self._naming_scope = None
self._context_graph = context_graph
self._unnest_only = unnest_only
self._with_none_control_dependencies = with_none_control_dependencies
self._composite_device_name = composite_device_name
def has_placeholder(self, alias_id: Hashable) -> bool:
return alias_id in self._alias_id_to_placeholder
def get_placeholder(self, alias_id: Hashable) -> Hashable:
if not self.has_placeholder(alias_id):
raise KeyError(f"alias_id: {alias_id} not found in this instance of "
"placeholder context.")
return self._alias_id_to_placeholder[alias_id]
def add_placeholder(self, alias_id: Hashable, placeholder: Hashable) -> None:
if alias_id in self._alias_id_to_placeholder:
raise KeyError(f"alias id: {alias_id} is already stored in this "
"instance of placeholder context.")
self._alias_id_to_placeholder[alias_id] = placeholder
def update_naming_scope(self, naming_scope: Optional[str]) -> None:
self._naming_scope = naming_scope
@property
def naming_scope(self) -> Optional[str]:
return self._naming_scope
@property
def context_graph(self):
return self._context_graph
@property
def unnest_only(self) -> bool:
return self._unnest_only
@property
def with_none_control_dependencies(self) -> bool:
return self._with_none_control_dependencies
@property
def composite_device_name(self) -> Any:
return self._composite_device_name
| InternalPlaceholderContext |
python | astropy__astropy | astropy/utils/masked/tests/test_containers.py | {
"start": 4399,
"end": 6191
} | class ____:
def setup_class(self):
self.s = np.array(
[
"2010-11-12T13:14:15.160",
"2010-11-12T13:14:15.161",
"2011-12-13T14:15:16.170",
]
)
self.t = Time(self.s)
# Time formats will currently strip any ndarray subtypes, so we cannot
# initialize a Time with a Masked version of self.s yet. Instead, we
# work around it, for now only testing that masked are preserved by
# transformations.
self.mask = np.array([False, False, True])
self.mt = self.t._apply(Masked, self.mask)
def test_initialization(self):
assert_array_equal(self.mt.jd1.mask, self.mask)
assert_array_equal(self.mt.jd2.mask, self.mask)
assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)
assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)
@pytest.mark.parametrize("format_", ["jd", "cxcsec", "jyear"])
def test_different_formats(self, format_):
# Formats do not yet work with everything; e.g., isot is not supported
# since the Masked class does not yet support structured arrays.
tfmt = getattr(self.t, format_)
mtfmt = getattr(self.mt, format_)
check = mtfmt == tfmt
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
@pytest.mark.parametrize("scale", ["tai", "tcb", "ut1"])
def test_transformation(self, scale):
tscl = getattr(self.t, scale)
mtscl = getattr(self.mt, scale)
assert_array_equal(mtscl.jd1.mask, self.mask)
assert_array_equal(mtscl.jd2.mask, self.mask)
assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)
assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)
| TestTime |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 55961,
"end": 56711
} | class ____(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
str(self.msc)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
assert self.msc[0].shape == ()
str(self.msc[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
repr(self.msc)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
repr(self.msc[0])
| TestMaskedArrayRepr |
python | pandas-dev__pandas | pandas/tests/indexing/multiindex/test_setitem.py | {
"start": 14826,
"end": 18434
} | class ____:
def test_setitem_new_column_mixed_depth(self):
arrays = [
["a", "top", "top", "routine1", "routine1", "routine2"],
["", "OD", "OD", "result1", "result2", "result1"],
["", "wx", "wy", "", "", ""],
]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index)
result = df.copy()
expected = df.copy()
result["b"] = [1, 2, 3, 4]
expected["b", "", ""] = [1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_setitem_new_column_all_na(self):
# GH#1534
mix = MultiIndex.from_tuples([("1a", "2a"), ("1a", "2b"), ("1a", "2c")])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df["new"] = s
assert df["new"].isna().all()
def test_setitem_enlargement_keep_index_names(self):
# GH#53053
mi = MultiIndex.from_tuples([(1, 2, 3)], names=["i1", "i2", "i3"])
df = DataFrame(data=[[10, 20, 30]], index=mi, columns=["A", "B", "C"])
df.loc[(0, 0, 0)] = df.loc[(1, 2, 3)]
mi_expected = MultiIndex.from_tuples(
[(1, 2, 3), (0, 0, 0)], names=["i1", "i2", "i3"]
)
expected = DataFrame(
data=[[10, 20, 30], [10, 20, 30]],
index=mi_expected,
columns=["A", "B", "C"],
)
tm.assert_frame_equal(df, expected)
def test_setitem_enlargement_multiindex_with_none(self):
# GH#59153
# enlarging a DataFrame with a MultiIndex containing None values
index = MultiIndex.from_tuples(
[("A", "a1"), ("A", "a2"), ("B", "b1"), ("B", None)]
)
df = DataFrame([(0.0, 6.0), (1.0, 5.0), (2.0, 4.0), (3.0, 7.0)], index=index)
df.loc[("A", None), :] = [12.0, 13.0]
expected_index = MultiIndex.from_tuples(
[("A", "a1"), ("A", "a2"), ("B", "b1"), ("B", None), ("A", None)]
)
expected = DataFrame(
[[0.0, 6.0], [1.0, 5.0], [2.0, 4.0], [3.0, 7.0], [12.0, 13.0]],
index=expected_index,
columns=[0, 1],
)
tm.assert_frame_equal(df, expected, check_index_type=False)
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
# really a no-no
df = multiindex_dataframe_random_data.T
with pytest.raises(ValueError, match="read-only"):
df["foo"].values[:] = 0
assert (df["foo"].values != 0).all()
def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
with tm.raises_chained_assignment_error():
df["foo"]["one"] = 2
def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
with tm.raises_chained_assignment_error():
df["foo"]["one"] = 2
tm.assert_frame_equal(df, expected)
def test_frame_setitem_partial_multiindex():
# GH 54875
df = DataFrame(
{
"a": [1, 2, 3],
"b": [3, 4, 5],
"c": 6,
"d": 7,
}
).set_index(["a", "b", "c"])
ser = Series(8, index=df.index.droplevel("c"))
result = df.copy()
result["d"] = ser
expected = df.copy()
expected["d"] = 8
tm.assert_frame_equal(result, expected)
| TestSetitemWithExpansionMultiIndex |
python | encode__django-rest-framework | tests/test_serializer.py | {
"start": 532,
"end": 1684
} | class ____:
def is_field(self, name, value):
return (
isinstance(value, type) and
issubclass(value, Field) and
not name.startswith('_')
)
def test_fields(self):
msg = "Expected `fields.%s` to be imported in `serializers`"
field_classes = [
key for key, value
in inspect.getmembers(fields)
if self.is_field(key, value)
]
# sanity check
assert 'Field' in field_classes
assert 'BooleanField' in field_classes
for field in field_classes:
assert hasattr(serializers, field), msg % field
def test_relations(self):
msg = "Expected `relations.%s` to be imported in `serializers`"
field_classes = [
key for key, value
in inspect.getmembers(relations)
if self.is_field(key, value)
]
# sanity check
assert 'RelatedField' in field_classes
for field in field_classes:
assert hasattr(serializers, field), msg % field
# Tests for core functionality.
# -----------------------------
| TestFieldImports |
python | numba__numba | numba/core/typing/templates.py | {
"start": 36311,
"end": 38790
} | class ____(_TemplateTargetHelperMixin, AbstractTemplate):
"""
A base class of templates for intrinsic definition
"""
def generic(self, args, kws):
"""
Type the intrinsic by the arguments.
"""
lower_builtin = self._get_target_registry('intrinsic').lower
cache_key = self.context, args, tuple(kws.items())
try:
return self._impl_cache[cache_key]
except KeyError:
pass
result = self._definition_func(self.context, *args, **kws)
if result is None:
return
[sig, imp] = result
pysig = utils.pysignature(self._definition_func)
# omit context argument from user function
parameters = list(pysig.parameters.values())[1:]
sig = sig.replace(pysig=pysig.replace(parameters=parameters))
self._impl_cache[cache_key] = sig
self._overload_cache[sig.args] = imp
# register the lowering
lower_builtin(imp, *sig.args)(imp)
return sig
def get_impl_key(self, sig):
"""
Return the key for looking up the implementation for the given
signature on the target context.
"""
return self._overload_cache[sig.args]
def get_template_info(self):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
impl = self._definition_func
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "intrinsic",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
def make_intrinsic_template(handle, defn, name, *, prefer_literal=False,
kwargs=None):
"""
Make a template class for a intrinsic handle *handle* defined by the
function *defn*. The *name* is used for naming the new template class.
"""
kwargs = MappingProxyType({} if kwargs is None else kwargs)
base = _IntrinsicTemplate
name = "_IntrinsicTemplate_%s" % (name)
dct = dict(key=handle, _definition_func=staticmethod(defn),
_impl_cache={}, _overload_cache={},
prefer_literal=prefer_literal, metadata=kwargs)
return type(base)(name, (base,), dct)
| _IntrinsicTemplate |
python | huggingface__transformers | src/transformers/models/xcodec/modeling_xcodec.py | {
"start": 9868,
"end": 12190
} | class ____(nn.Module):
"""
Residual vector quantization implementation. Follows Algorithm 1 in https://huggingface.co/papers/2107.03312
"""
def __init__(self, config: XcodecConfig):
super().__init__()
self.quantizers = nn.ModuleList([XcodecVectorQuantization(config) for _ in range(config.num_quantizers)])
self.frame_rate = config.frame_rate
self.codebook_size = config.codebook_size
self.num_quantizers = config.num_quantizers
def get_bandwidth_per_quantizer(self):
"""Return bandwidth per quantizer."""
return math.log2(self.codebook_size) * self.frame_rate / 1000
def get_num_quantizers_for_bandwidth(self, bandwidth=None) -> int:
"""Return num_quantizers based on specified target bandwidth."""
bw_per_q = self.get_bandwidth_per_quantizer()
num_quantizers = self.num_quantizers
if bandwidth is not None and bandwidth > 0.0:
num_quantizers = int(max(1, math.floor(bandwidth / bw_per_q)))
return num_quantizers
def encode(self, embeddings: torch.Tensor, bandwidth=None) -> torch.Tensor:
"""
Encode the input tensor into discrete indices using RVQ, with the number of quantizers selected based on the given bandwidth.
Each quantizer /codebook residually quantizes the input and returns the nearest indices in terms of Euclidian distance.
"""
num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
residual = embeddings
all_indices = []
for quantizer in self.quantizers[:num_quantizers]:
indices = quantizer.encode(residual)
quantized = quantizer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to their quantized representation."""
quantized_out = torch.tensor(0.0, device=codes.device)
for i, indices in enumerate(codes):
quantizer = self.quantizers[i]
quantized = quantizer.decode(indices)
quantized_out = quantized_out + quantized
return quantized_out
@auto_docstring
| XcodecResidualVectorQuantization |
python | doocs__leetcode | solution/2200-2299/2282.Number of People That Can Be Seen in a Grid/Solution.py | {
"start": 0,
"end": 809
} | class ____:
def seePeople(self, heights: List[List[int]]) -> List[List[int]]:
def f(nums: List[int]) -> List[int]:
n = len(nums)
stk = []
ans = [0] * n
for i in range(n - 1, -1, -1):
while stk and stk[-1] < nums[i]:
ans[i] += 1
stk.pop()
if stk:
ans[i] += 1
while stk and stk[-1] == nums[i]:
stk.pop()
stk.append(nums[i])
return ans
ans = [f(row) for row in heights]
m, n = len(heights), len(heights[0])
for j in range(n):
add = f([heights[i][j] for i in range(m)])
for i in range(m):
ans[i][j] += add[i]
return ans
| Solution |
python | apache__airflow | providers/apache/pig/src/airflow/providers/apache/pig/operators/pig.py | {
"start": 1113,
"end": 3039
} | class ____(BaseOperator):
"""
Executes pig script.
:param pig: the pig latin script to be executed. (templated)
:param pig_cli_conn_id: reference to the Hive database
:param pigparams_jinja_translate: when True, pig params-type templating
${var} gets translated into jinja-type templating {{ var }}. Note that
you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:param pig_opts: pig options, such as: -x tez, -useHCatalog, ... - space separated list
:param pig_properties: pig properties, additional pig properties passed as list
"""
template_fields: Sequence[str] = ("pig", "pig_opts", "pig_properties")
template_ext: Sequence[str] = (
".pig",
".piglatin",
)
ui_color = "#f0e4ec"
def __init__(
self,
*,
pig: str,
pig_cli_conn_id: str = "pig_cli_default",
pigparams_jinja_translate: bool = False,
pig_opts: str | None = None,
pig_properties: list[str] | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.pigparams_jinja_translate = pigparams_jinja_translate
self.pig = pig
self.pig_cli_conn_id = pig_cli_conn_id
self.pig_opts = pig_opts
self.pig_properties = pig_properties
self.hook: PigCliHook | None = None
def prepare_template(self):
if self.pigparams_jinja_translate:
self.pig = re.sub(r"(\$([a-zA-Z_][a-zA-Z0-9_]*))", r"{{ \g<2> }}", self.pig)
def execute(self, context: Context):
self.log.info("Executing: %s", self.pig)
self.hook = PigCliHook(pig_cli_conn_id=self.pig_cli_conn_id, pig_properties=self.pig_properties)
self.hook.run_cli(pig=self.pig, pig_opts=self.pig_opts)
def on_kill(self):
self.hook.kill()
| PigOperator |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_history.py | {
"start": 2390,
"end": 8113
} | class ____(IntegrationTestCase):
PANEL_KEYS = {
"VersionsPanel",
"TimerPanel",
"SettingsPanel",
"HeadersPanel",
"RequestPanel",
"SQLPanel",
"StaticFilesPanel",
"TemplatesPanel",
"AlertsPanel",
"CachePanel",
"SignalsPanel",
"CommunityPanel",
"ProfilingPanel",
}
def test_history_panel_integration_content(self):
"""Verify the history panel's content renders properly.."""
store = get_store()
self.assertEqual(len(list(store.request_ids())), 0)
data = {"foo": "bar"}
self.client.get("/json_view/", data, content_type="application/json")
# Check the history panel's stats to verify the toolbar rendered properly.
request_ids = list(store.request_ids())
self.assertEqual(len(request_ids), 1)
toolbar = DebugToolbar.fetch(request_ids[0])
content = toolbar.get_panel_by_id(HistoryPanel.panel_id).content
self.assertIn("bar", content)
self.assertIn('name="exclude_history" value="True"', content)
def test_history_sidebar_invalid(self):
response = self.client.get(reverse("djdt:history_sidebar"))
self.assertEqual(response.status_code, 400)
def test_history_headers(self):
"""Validate the headers injected from the history panel."""
DebugToolbar.get_observe_request.cache_clear()
response = self.client.get("/json_view/")
request_id = list(get_store().request_ids())[0]
self.assertEqual(response.headers["djdt-request-id"], request_id)
def test_history_headers_unobserved(self):
"""Validate the headers aren't injected from the history panel."""
with self.settings(
DEBUG_TOOLBAR_CONFIG={"OBSERVE_REQUEST_CALLBACK": lambda request: False}
):
DebugToolbar.get_observe_request.cache_clear()
response = self.client.get("/json_view/")
self.assertNotIn("djdt-request-id", response.headers)
# Clear it again to avoid conflicting with another test
# Specifically, DebugToolbarLiveTestCase.test_ajax_refresh
DebugToolbar.get_observe_request.cache_clear()
def test_history_sidebar(self):
"""Validate the history sidebar view."""
self.client.get("/json_view/")
request_id = list(get_store().request_ids())[0]
data = {"request_id": request_id, "exclude_history": True}
response = self.client.get(reverse("djdt:history_sidebar"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.json()),
self.PANEL_KEYS,
)
def test_history_sidebar_includes_history(self):
"""Validate the history sidebar view."""
self.client.get("/json_view/")
panel_keys = copy.copy(self.PANEL_KEYS)
panel_keys.add(HistoryPanel.panel_id)
panel_keys.add(RedirectsPanel.panel_id)
request_id = list(get_store().request_ids())[0]
data = {"request_id": request_id}
response = self.client.get(reverse("djdt:history_sidebar"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.json()),
panel_keys,
)
@override_settings(
DEBUG_TOOLBAR_CONFIG={"RENDER_PANELS": False, "RESULTS_CACHE_SIZE": 1}
)
def test_history_sidebar_expired_request_id(self):
"""Validate the history sidebar view."""
self.client.get("/json_view/")
request_id = list(get_store().request_ids())[0]
data = {"request_id": request_id, "exclude_history": True}
response = self.client.get(reverse("djdt:history_sidebar"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.json()),
self.PANEL_KEYS,
)
# Make enough requests to unset the original
self.client.get("/json_view/")
# Querying old request_id should return in empty response
data = {"request_id": request_id, "exclude_history": True}
response = self.client.get(reverse("djdt:history_sidebar"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
# Querying with latest request_id
latest_request_id = list(get_store().request_ids())[0]
data = {"request_id": latest_request_id, "exclude_history": True}
response = self.client.get(reverse("djdt:history_sidebar"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.json()),
self.PANEL_KEYS,
)
def test_history_refresh(self):
"""Verify refresh history response has request variables."""
self.client.get("/json_view/", {"foo": "bar"}, content_type="application/json")
self.client.get(
"/json_view/", {"spam": "eggs"}, content_type="application/json"
)
response = self.client.get(
reverse("djdt:history_refresh"), data={"request_id": "foo"}
)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(len(data["requests"]), 2)
request_ids = list(get_store().request_ids())
self.assertIn(html.escape(request_ids[0]), data["requests"][0]["content"])
self.assertIn(html.escape(request_ids[1]), data["requests"][1]["content"])
for val in ["foo", "bar"]:
self.assertIn(val, data["requests"][0]["content"])
for val in ["spam", "eggs"]:
self.assertIn(val, data["requests"][1]["content"])
| HistoryViewsTestCase |
python | arrow-py__arrow | arrow/locales.py | {
"start": 29498,
"end": 31272
} | class ____(SlavicBaseLocale):
names = ["be", "be-by"]
past = "{0} таму"
future = "праз {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "зараз",
"second": "секунду",
"seconds": "{0} некалькі секунд",
"minute": "хвіліну",
"minutes": {
"singular": "{0} хвіліну",
"dual": "{0} хвіліны",
"plural": "{0} хвілін",
},
"hour": "гадзіну",
"hours": {
"singular": "{0} гадзіну",
"dual": "{0} гадзіны",
"plural": "{0} гадзін",
},
"day": "дзень",
"days": {"singular": "{0} дзень", "dual": "{0} дні", "plural": "{0} дзён"},
"month": "месяц",
"months": {
"singular": "{0} месяц",
"dual": "{0} месяцы",
"plural": "{0} месяцаў",
},
"year": "год",
"years": {"singular": "{0} год", "dual": "{0} гады", "plural": "{0} гадоў"},
}
month_names = [
"",
"студзеня",
"лютага",
"сакавіка",
"красавіка",
"траўня",
"чэрвеня",
"ліпеня",
"жніўня",
"верасня",
"кастрычніка",
"лістапада",
"снежня",
]
month_abbreviations = [
"",
"студ",
"лют",
"сак",
"крас",
"трав",
"чэрв",
"ліп",
"жнів",
"вер",
"каст",
"ліст",
"снеж",
]
day_names = [
"",
"панядзелак",
"аўторак",
"серада",
"чацвер",
"пятніца",
"субота",
"нядзеля",
]
day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"]
| BelarusianLocale |
python | gevent__gevent | src/greentest/3.9/test_asyncore.py | {
"start": 25413,
"end": 25634
} | class ____(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
support.unlink(self.addr)
BaseTestAPI.tearDown(self)
| TestAPI_UseUnixSockets |
python | getsentry__sentry | src/sentry/analytics/events/first_profile_sent.py | {
"start": 75,
"end": 273
} | class ____(analytics.Event):
organization_id: int
project_id: int
platform: str | None = None
user_id: int | None = None
analytics.register(FirstProfileSentEvent)
| FirstProfileSentEvent |
python | python-pillow__Pillow | src/PIL/Jpeg2KImagePlugin.py | {
"start": 594,
"end": 7899
} | class ____:
"""
A small helper class to read fields stored in JPEG2000 header boxes
and to easily step into and read sub-boxes.
"""
def __init__(self, fp: IO[bytes], length: int = -1) -> None:
self.fp = fp
self.has_length = length >= 0
self.length = length
self.remaining_in_box = -1
def _can_read(self, num_bytes: int) -> bool:
if self.has_length and self.fp.tell() + num_bytes > self.length:
# Outside box: ensure we don't read past the known file length
return False
if self.remaining_in_box >= 0:
# Inside box contents: ensure read does not go past box boundaries
return num_bytes <= self.remaining_in_box
else:
return True # No length known, just read
def _read_bytes(self, num_bytes: int) -> bytes:
if not self._can_read(num_bytes):
msg = "Not enough data in header"
raise SyntaxError(msg)
data = self.fp.read(num_bytes)
if len(data) < num_bytes:
msg = f"Expected to read {num_bytes} bytes but only got {len(data)}."
raise OSError(msg)
if self.remaining_in_box > 0:
self.remaining_in_box -= num_bytes
return data
def read_fields(self, field_format: str) -> tuple[int | bytes, ...]:
size = struct.calcsize(field_format)
data = self._read_bytes(size)
return struct.unpack(field_format, data)
def read_boxes(self) -> BoxReader:
size = self.remaining_in_box
data = self._read_bytes(size)
return BoxReader(io.BytesIO(data), size)
def has_next_box(self) -> bool:
if self.has_length:
return self.fp.tell() + self.remaining_in_box < self.length
else:
return True
def next_box_type(self) -> bytes:
# Skip the rest of the box if it has not been read
if self.remaining_in_box > 0:
self.fp.seek(self.remaining_in_box, os.SEEK_CUR)
self.remaining_in_box = -1
# Read the length and type of the next box
lbox, tbox = cast(tuple[int, bytes], self.read_fields(">I4s"))
if lbox == 1:
lbox = cast(int, self.read_fields(">Q")[0])
hlen = 16
else:
hlen = 8
if lbox < hlen or not self._can_read(lbox - hlen):
msg = "Invalid header length"
raise SyntaxError(msg)
self.remaining_in_box = lbox - hlen
return tbox
def _parse_codestream(fp: IO[bytes]) -> tuple[tuple[int, int], str]:
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = _binary.i16be(hdr)
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(
">HHIIIIIIIIH", siz
)
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
ssiz = struct.unpack_from(">B", siz, 38)
if (ssiz[0] & 0x7F) + 1 > 8:
mode = "I;16"
else:
mode = "L"
elif csiz == 2:
mode = "LA"
elif csiz == 3:
mode = "RGB"
elif csiz == 4:
mode = "RGBA"
else:
msg = "unable to determine J2K image mode"
raise SyntaxError(msg)
return size, mode
def _res_to_dpi(num: int, denom: int, exp: int) -> float | None:
"""Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution,
calculated as (num / denom) * 10^exp and stored in dots per meter,
to floating-point dots per inch."""
if denom == 0:
return None
return (254 * num * (10**exp)) / (10000 * denom)
def _parse_jp2_header(
fp: IO[bytes],
) -> tuple[
tuple[int, int],
str,
str | None,
tuple[float, float] | None,
ImagePalette.ImagePalette | None,
]:
"""Parse the JP2 header box to extract size, component count,
color space information, and optionally DPI information,
returning a (size, mode, mimetype, dpi) tuple."""
# Find the JP2 header box
reader = BoxReader(fp)
header = None
mimetype = None
while reader.has_next_box():
tbox = reader.next_box_type()
if tbox == b"jp2h":
header = reader.read_boxes()
break
elif tbox == b"ftyp":
if reader.read_fields(">4s")[0] == b"jpx ":
mimetype = "image/jpx"
assert header is not None
size = None
mode = None
bpc = None
nc = None
dpi = None # 2-tuple of DPI info, or None
palette = None
while header.has_next_box():
tbox = header.next_box_type()
if tbox == b"ihdr":
height, width, nc, bpc = header.read_fields(">IIHB")
assert isinstance(height, int)
assert isinstance(width, int)
assert isinstance(bpc, int)
size = (width, height)
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
elif tbox == b"colr" and nc == 4:
meth, _, _, enumcs = header.read_fields(">BBBI")
if meth == 1 and enumcs == 12:
mode = "CMYK"
elif tbox == b"pclr" and mode in ("L", "LA"):
ne, npc = header.read_fields(">HB")
assert isinstance(ne, int)
assert isinstance(npc, int)
max_bitdepth = 0
for bitdepth in header.read_fields(">" + ("B" * npc)):
assert isinstance(bitdepth, int)
if bitdepth > max_bitdepth:
max_bitdepth = bitdepth
if max_bitdepth <= 8:
palette = ImagePalette.ImagePalette("RGBA" if npc == 4 else "RGB")
for i in range(ne):
color: list[int] = []
for value in header.read_fields(">" + ("B" * npc)):
assert isinstance(value, int)
color.append(value)
palette.getcolor(tuple(color))
mode = "P" if mode == "L" else "PA"
elif tbox == b"res ":
res = header.read_boxes()
while res.has_next_box():
tres = res.next_box_type()
if tres == b"resc":
vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB")
assert isinstance(vrcn, int)
assert isinstance(vrcd, int)
assert isinstance(hrcn, int)
assert isinstance(hrcd, int)
assert isinstance(vrce, int)
assert isinstance(hrce, int)
hres = _res_to_dpi(hrcn, hrcd, hrce)
vres = _res_to_dpi(vrcn, vrcd, vrce)
if hres is not None and vres is not None:
dpi = (hres, vres)
break
if size is None or mode is None:
msg = "Malformed JP2 header"
raise SyntaxError(msg)
return size, mode, mimetype, dpi, palette
##
# Image plugin for JPEG2000 images.
| BoxReader |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 22518,
"end": 23193
} | class ____(ApplyConcatApply):
_parameters = ["frame", "columns", "index", "values", "aggfunc"]
_defaults = {"columns": None, "index": None, "values": None, "aggfunc": "mean"}
@property
def chunk_kwargs(self):
return {
"index": self.operand("index"),
"columns": self.operand("columns"),
"values": self.operand("values"),
}
@classmethod
def combine(cls, inputs: list, **kwargs):
return _concat(inputs)
@classmethod
def aggregate(cls, inputs: list, **kwargs):
df = _concat(inputs)
return cls.aggregate_func(df, **kwargs) # type: ignore[attr-defined]
| PivotTableAbstract |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 99261,
"end": 99863
} | class ____(ClosureScope):
is_generator_expression_scope = True
def declare_assignment_expression_target(self, name, type, pos):
entry = self.parent_scope.declare_var(name, type, pos)
return self._create_inner_entry_for_closure(name, entry)
def lookup_assignment_expression_target(self, name):
entry = self.lookup_here(name)
if not entry:
entry = self.parent_scope.lookup_assignment_expression_target(name)
if entry:
return self._create_inner_entry_for_closure(name, entry)
return entry
| GeneratorExpressionScope |
python | dask__distributed | distributed/deploy/spec.py | {
"start": 3667,
"end": 24294
} | class ____(Cluster):
"""Cluster that requires a full specification of workers
The SpecCluster class expects a full specification of the Scheduler and
Workers to use. It removes any handling of user inputs (like threads vs
processes, number of cores, and so on) and any handling of cluster resource
managers (like pods, jobs, and so on). Instead, it expects this
information to be passed in scheduler and worker specifications. This
class does handle all of the logic around asynchronously cleanly setting up
and tearing things down at the right times. Hopefully it can form a base
for other more user-centric classes.
Parameters
----------
workers: dict
A dictionary mapping names to worker classes and their specifications
See example below
scheduler: dict, optional
A similar mapping for a scheduler
worker: dict
A specification of a single worker.
This is used for any new workers that are created.
asynchronous: bool
If this is intended to be used directly within an event loop with
async/await
silence_logs: bool
Whether or not we should silence logging when setting up the cluster.
name: str, optional
A name to use when printing out the cluster, defaults to type name
shutdown_on_close: bool
Whether or not to close the cluster when the program exits
shutdown_scheduler: bool
Whether or not to shut down the scheduler when the cluster is closed
Examples
--------
To create a SpecCluster you specify how to set up a Scheduler and Workers
>>> from dask.distributed import Scheduler, Worker, Nanny
>>> scheduler = {'cls': Scheduler, 'options': {"dashboard_address": ':8787'}}
>>> workers = {
... 'my-worker': {"cls": Worker, "options": {"nthreads": 1}},
... 'my-nanny': {"cls": Nanny, "options": {"nthreads": 2}},
... }
>>> cluster = SpecCluster(scheduler=scheduler, workers=workers)
The worker spec is stored as the ``.worker_spec`` attribute
>>> cluster.worker_spec
{
'my-worker': {"cls": Worker, "options": {"nthreads": 1}},
'my-nanny': {"cls": Nanny, "options": {"nthreads": 2}},
}
While the instantiation of this spec is stored in the ``.workers``
attribute
>>> cluster.workers
{
'my-worker': <Worker ...>
'my-nanny': <Nanny ...>
}
Should the spec change, we can await the cluster or call the
``._correct_state`` method to align the actual state to the specified
state.
We can also ``.scale(...)`` the cluster, which adds new workers of a given
form.
>>> worker = {'cls': Worker, 'options': {}}
>>> cluster = SpecCluster(scheduler=scheduler, worker=worker)
>>> cluster.worker_spec
{}
>>> cluster.scale(3)
>>> cluster.worker_spec
{
0: {'cls': Worker, 'options': {}},
1: {'cls': Worker, 'options': {}},
2: {'cls': Worker, 'options': {}},
}
Note that above we are using the standard ``Worker`` and ``Nanny`` classes,
however in practice other classes could be used that handle resource
management like ``KubernetesPod`` or ``SLURMJob``. The spec does not need
to conform to the expectations of the standard Dask Worker class. It just
needs to be called with the provided options, support ``__await__`` and
``close`` methods and the ``worker_address`` property..
Also note that uniformity of the specification is not required. Other API
could be added externally (in subclasses) that adds workers of different
specifications into the same dictionary.
If a single entry in the spec will generate multiple dask workers then
please provide a `"group"` element to the spec, that includes the suffixes
that will be added to each name (this should be handled by your worker
class).
>>> cluster.worker_spec
{
0: {"cls": MultiWorker, "options": {"processes": 3}, "group": ["-0", "-1", -2"]}
1: {"cls": MultiWorker, "options": {"processes": 2}, "group": ["-0", "-1"]}
}
These suffixes should correspond to the names used by the workers when
they deploy.
>>> [ws.name for ws in cluster.scheduler.workers.values()]
["0-0", "0-1", "0-2", "1-0", "1-1"]
"""
_instances: ClassVar[weakref.WeakSet[SpecCluster]] = weakref.WeakSet()
def __init__(
self,
workers=None,
scheduler=None,
worker=None,
asynchronous=False,
loop=None,
security=None,
silence_logs=False,
name=None,
shutdown_on_close=True,
scheduler_sync_interval=1,
shutdown_scheduler=True,
):
if loop is None and asynchronous:
loop = IOLoop.current()
self.__exit_stack = stack = contextlib.ExitStack()
self._created = weakref.WeakSet()
self.scheduler_spec = copy.copy(scheduler)
self.worker_spec = copy.copy(workers) or {}
self.new_spec = copy.copy(worker)
self.scheduler = None
self.workers = {}
self._i = 0
self.security = security or Security()
self._futures = set()
if silence_logs:
stack.enter_context(silence_logging_cmgr(level=silence_logs))
stack.enter_context(silence_logging_cmgr(level=silence_logs, root="bokeh"))
self._instances.add(self)
self._correct_state_waiting = None
self._name = name or type(self).__name__
self.shutdown_on_close = shutdown_on_close
self.shutdown_scheduler = shutdown_scheduler
super().__init__(
asynchronous=asynchronous,
loop=loop,
name=name,
scheduler_sync_interval=scheduler_sync_interval,
)
if not self.called_from_running_loop:
self._loop_runner.start()
self.sync(self._start)
try:
self.sync(self._correct_state)
except Exception:
self.sync(self.close)
self._loop_runner.stop()
raise
def close(self, timeout: float | None = None) -> Awaitable[None] | None:
aw = super().close(timeout)
if not self.asynchronous:
self._loop_runner.stop()
return aw
async def _start(self):
while self.status == Status.starting:
await asyncio.sleep(0.01)
if self.status == Status.running:
return
if self.status == Status.closed:
raise ValueError("Cluster is closed")
self._lock = asyncio.Lock()
self.status = Status.starting
if self.scheduler_spec is None:
try:
import distributed.dashboard # noqa: F401
except ImportError:
pass
else:
options = {"dashboard": True}
self.scheduler_spec = {"cls": Scheduler, "options": options}
try:
# Check if scheduler has already been created by a subclass
if self.scheduler is None:
cls = self.scheduler_spec["cls"]
if isinstance(cls, str):
cls = import_term(cls)
self.scheduler = cls(**self.scheduler_spec.get("options", {}))
self.scheduler = await self.scheduler
self.scheduler_comm = rpc(
getattr(self.scheduler, "external_address", None)
or self.scheduler.address,
connection_args=self.security.get_connection_args("client"),
)
await super()._start()
except Exception as e: # pragma: no cover
self.status = Status.failed
await self._close()
raise RuntimeError(f"Cluster failed to start: {e}") from e
def _correct_state(self):
if self._correct_state_waiting:
# If people call this frequently, we only want to run it once
return self._correct_state_waiting
else:
task = asyncio.ensure_future(self._correct_state_internal())
self._correct_state_waiting = task
return task
async def _correct_state_internal(self) -> None:
async with self._lock:
self._correct_state_waiting = None
to_close = set(self.workers) - set(self.worker_spec)
if to_close:
if self.scheduler.status == Status.running:
await self.scheduler_comm.retire_workers(workers=list(to_close))
tasks = [
asyncio.create_task(self.workers[w].close())
for w in to_close
if w in self.workers
]
await asyncio.gather(*tasks)
for name in to_close:
if name in self.workers:
del self.workers[name]
to_open = set(self.worker_spec) - set(self.workers)
workers = []
for name in to_open:
d = self.worker_spec[name]
cls, opts = d["cls"], d.get("options", {})
if "name" not in opts:
opts = opts.copy()
opts["name"] = name
if isinstance(cls, str):
cls = import_term(cls)
worker = cls(
getattr(self.scheduler, "contact_address", None)
or self.scheduler.address,
**opts,
)
self._created.add(worker)
workers.append(worker)
if workers:
worker_futs = [asyncio.ensure_future(w) for w in workers]
await asyncio.wait(worker_futs)
self.workers.update(dict(zip(to_open, workers)))
for w in workers:
w._cluster = weakref.ref(self)
# Collect exceptions from failed workers. This must happen after all
# *other* workers have finished initialising, so that we can have a
# proper teardown.
await asyncio.gather(*worker_futs)
def _update_worker_status(self, op, msg):
if op == "remove":
name = self.scheduler_info["workers"][msg]["name"]
def f():
if (
name in self.workers
and msg not in self.scheduler_info["workers"]
and not any(
d["name"] == name
for d in self.scheduler_info["workers"].values()
)
):
self._futures.add(asyncio.ensure_future(self.workers[name].close()))
del self.workers[name]
delay = parse_timedelta(
dask.config.get("distributed.deploy.lost-worker-timeout")
)
asyncio.get_running_loop().call_later(delay, f)
super()._update_worker_status(op, msg)
def __await__(self: Self) -> Generator[Any, Any, Self]:
async def _() -> Self:
if self.status == Status.created:
await self._start()
await self.scheduler
await self._correct_state()
if self.workers:
await asyncio.wait(
[
asyncio.create_task(_wrap_awaitable(w))
for w in self.workers.values()
]
) # maybe there are more
return self
return _().__await__()
async def _close(self):
while self.status == Status.closing:
await asyncio.sleep(0.1)
if self.status == Status.closed:
return
if self.status == Status.running or self.status == Status.failed:
self.status = Status.closing
# Need to call stop here before we close all servers to avoid having
# dangling tasks in the ioloop
with suppress(AttributeError):
self._adaptive.stop()
f = self.scale(0)
if isawaitable(f):
await f
await self._correct_state()
await asyncio.gather(*self._futures)
if self.scheduler_comm:
async with self._lock:
if self.shutdown_scheduler:
with suppress(OSError):
await self.scheduler_comm.terminate()
await self.scheduler_comm.close_rpc()
else:
logger.warning("Cluster closed without starting up")
if self.scheduler and self.shutdown_scheduler:
await self.scheduler.close()
for w in self._created:
assert w.status in {
Status.closing,
Status.closed,
Status.failed,
}, w.status
self.__exit_stack.__exit__(None, None, None)
await super()._close()
async def __aenter__(self):
try:
await self
await self._correct_state()
assert self.status == Status.running
return self
except Exception:
await self._close()
raise
def _threads_per_worker(self) -> int:
"""Return the number of threads per worker for new workers"""
if not self.new_spec: # pragma: no cover
raise ValueError("To scale by cores= you must specify cores per worker")
for name in ["nthreads", "ncores", "threads", "cores"]:
with suppress(KeyError):
return self.new_spec["options"][name]
raise RuntimeError("unreachable")
def _memory_per_worker(self) -> int:
"""Return the memory limit per worker for new workers"""
if not self.new_spec: # pragma: no cover
raise ValueError(
"to scale by memory= your worker definition must include a "
"memory_limit definition"
)
for name in ["memory_limit", "memory"]:
with suppress(KeyError):
return parse_bytes(self.new_spec["options"][name])
raise ValueError(
"to use scale(memory=...) your worker definition must include a "
"memory_limit definition"
)
def scale(self, n=0, memory=None, cores=None):
if memory is not None:
n = max(n, math.ceil(parse_bytes(memory) / self._memory_per_worker()))
if cores is not None:
n = max(n, math.ceil(cores / self._threads_per_worker()))
if len(self.worker_spec) > n:
not_yet_launched = set(self.worker_spec) - {
v["name"] for v in self.scheduler_info["workers"].values()
}
while len(self.worker_spec) > n and not_yet_launched:
del self.worker_spec[not_yet_launched.pop()]
while len(self.worker_spec) > n:
self.worker_spec.popitem()
if self.status not in (Status.closing, Status.closed):
while len(self.worker_spec) < n:
self.worker_spec.update(self.new_worker_spec())
self.loop.add_callback(self._correct_state)
if self.asynchronous:
return NoOpAwaitable()
def _new_worker_name(self, worker_number):
"""Returns new worker name.
This can be overridden in SpecCluster derived classes to customise the
worker names.
"""
return worker_number
def new_worker_spec(self):
"""Return name and spec for the next worker
Returns
-------
d: dict mapping names to worker specs
See Also
--------
scale
"""
new_worker_name = self._new_worker_name(self._i)
while new_worker_name in self.worker_spec:
self._i += 1
new_worker_name = self._new_worker_name(self._i)
return {new_worker_name: self.new_spec}
@property
def _supports_scaling(self):
return bool(self.new_spec)
async def scale_down(self, workers):
# We may have groups, if so, map worker addresses to job names
if not all(w in self.worker_spec for w in workers):
mapping = {}
for name, spec in self.worker_spec.items():
if "group" in spec:
for suffix in spec["group"]:
mapping[str(name) + suffix] = name
else:
mapping[name] = name
workers = {mapping.get(w, w) for w in workers}
for w in workers:
if w in self.worker_spec:
del self.worker_spec[w]
await self
scale_up = scale # backwards compatibility
@property
def plan(self):
out = set()
for name, spec in self.worker_spec.items():
if "group" in spec:
out.update({str(name) + suffix for suffix in spec["group"]})
else:
out.add(name)
return out
@property
def requested(self):
out = set()
for name in self.workers:
try:
spec = self.worker_spec[name]
except KeyError:
continue
if "group" in spec:
out.update({str(name) + suffix for suffix in spec["group"]})
else:
out.add(name)
return out
def adapt(
self,
Adaptive: type[Adaptive] = Adaptive,
minimum: float = 0,
maximum: float = math.inf,
minimum_cores: int | None = None,
maximum_cores: int | None = None,
minimum_memory: str | None = None,
maximum_memory: str | None = None,
**kwargs: Any,
) -> Adaptive:
"""Turn on adaptivity
This scales Dask clusters automatically based on scheduler activity.
Parameters
----------
minimum : int
Minimum number of workers
maximum : int
Maximum number of workers
minimum_cores : int
Minimum number of cores/threads to keep around in the cluster
maximum_cores : int
Maximum number of cores/threads to keep around in the cluster
minimum_memory : str
Minimum amount of memory to keep around in the cluster
Expressed as a string like "100 GiB"
maximum_memory : str
Maximum amount of memory to keep around in the cluster
Expressed as a string like "100 GiB"
Examples
--------
>>> cluster.adapt(minimum=0, maximum_memory="100 GiB", interval='500ms')
See Also
--------
dask.distributed.Adaptive : for more keyword arguments
"""
if minimum_cores is not None:
minimum = max(
minimum or 0, math.ceil(minimum_cores / self._threads_per_worker())
)
if minimum_memory is not None:
minimum = max(
minimum or 0,
math.ceil(parse_bytes(minimum_memory) / self._memory_per_worker()),
)
if maximum_cores is not None:
maximum = min(
maximum, math.floor(maximum_cores / self._threads_per_worker())
)
if maximum_memory is not None:
maximum = min(
maximum,
math.floor(parse_bytes(maximum_memory) / self._memory_per_worker()),
)
return super().adapt(
Adaptive=Adaptive, minimum=minimum, maximum=maximum, **kwargs
)
@classmethod
def from_name(cls, name: str) -> ProcessInterface:
"""Create an instance of this class to represent an existing cluster by name."""
raise NotImplementedError()
def init_spec(spec: dict[str, Any], *args: Any) -> dict[str, Worker | Nanny]:
workers = {}
for k, d in spec.items():
cls = d["cls"]
if isinstance(cls, str):
cls = import_term(cls)
workers[k] = cls(*args, **d.get("opts", {}))
return workers
async def run_spec(spec: dict[str, Any], *args: Any) -> dict[str, Worker | Nanny]:
workers = init_spec(spec, *args)
if workers:
await asyncio.gather(*workers.values())
return workers
@atexit.register
def close_clusters():
for cluster in list(SpecCluster._instances):
if getattr(cluster, "shutdown_on_close", False):
with suppress(gen.TimeoutError, TimeoutError):
if getattr(cluster, "status", Status.closed) != Status.closed:
cluster.close(timeout=10)
| SpecCluster |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 28472,
"end": 29610
} | class ____:
def test_mixed_structured_unstructured_data(self):
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker)
unstructured_secret = "this_is_a_secret_pattern"
secrets_masker.add_mask(unstructured_secret)
mixed_data = {
"normal_field": "normal_value",
"password": "short_pw",
"description": f"Text containing {unstructured_secret} that should be masked",
"nested": {"token": "tk", "info": "No secrets here"},
}
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
redacted_data = redact(mixed_data)
assert redacted_data["normal_field"] == "normal_value"
assert redacted_data["password"] == "***"
assert unstructured_secret not in redacted_data["description"]
assert "***" in redacted_data["description"]
assert redacted_data["nested"]["token"] == "***"
assert redacted_data["nested"]["info"] == "No secrets here"
| TestMixedDataScenarios |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 26552,
"end": 27830
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
token ids which have their past given to this model should not be passed as `input_ids` as they have
already been computed.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
mems: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`XLNetForSequenceClassification`].
"""
)
| XLNetLMHeadModelOutput |
python | nedbat__coveragepy | tests/helpers.py | {
"start": 11021,
"end": 12047
} | class ____:
"""A proxy for another object, but one method will fail a few times before working."""
def __init__(self, obj: Any, methname: str, fails: list[Exception]) -> None:
"""Create the failing proxy.
`obj` is the object to proxy. `methname` is the method that will fail
a few times. `fails` are the exceptions to fail with. Once used up,
the method will proxy correctly.
"""
self.obj = obj
self.methname = methname
self.fails = fails
def __getattr__(self, name: str) -> Any:
if name == self.methname and self.fails:
meth = self._make_failing_method(self.fails[0])
del self.fails[0]
else:
meth = getattr(self.obj, name)
return meth
def _make_failing_method(self, exc: Exception) -> Callable[..., NoReturn]:
"""Return a function that will raise `exc`."""
def _meth(*args: Any, **kwargs: Any) -> NoReturn:
raise exc
return _meth
| FailingProxy |
python | psf__requests | src/requests/cookies.py | {
"start": 438,
"end": 2894
} | class ____:
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `http.cookiejar.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get("Host"):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers["Host"], encoding="utf-8")
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse(
[
parsed.scheme,
host,
parsed.path,
parsed.params,
parsed.query,
parsed.fragment,
]
)
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookiejar has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError(
"Cookie headers should be added with add_unredirected_header()"
)
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
| MockRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict21.py | {
"start": 294,
"end": 343
} | class ____(TypedDict):
v1: NotRequired[int]
| TD3 |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/argmax_op_test.py | {
"start": 1038,
"end": 5243
} | class ____(test.TestCase, parameterized.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
# Compilation time is too large with XLA/CPU autojit.
if not test_util.is_xla_enabled():
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.arange(200, dtype=np.float32).astype(dtype)
np.random.shuffle(x)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testTieBreaking(self, dtype):
x = np.zeros(200, dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis for
# breaking ties.
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
# Check that argmin and argmax match numpy along axis=1 for
# breaking ties.
x = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [0, 1, 0, 1]], dtype=dtype)
self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))
self._testBothArg(math_ops.argmin, x, 1, x.argmin(axis=1))
def _testDim(self, dtype):
shape = (3, 2, 4, 5, 6, 3, 7)
x = np.arange(
functools.reduce(lambda x, y: x * y, shape),
dtype=np.float32).astype(dtype)
np.random.shuffle(x)
x = x.reshape(shape)
# Check that argmin and argmax match numpy along all axes
for axis in range(-7, 7):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
@parameterized.parameters(np.float16, np.float32, np.float64, np.int16,
np.int32, np.int64, np.bool_,
dtypes.bfloat16.as_numpy_dtype)
def testTypes(self, dtype):
self._testBasic(dtype,)
self._testTieBreaking(dtype)
self._testDim(dtype)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session():
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session():
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
| ArgMaxTest |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_sum_to_be_between.py | {
"start": 2619,
"end": 15934
} | class ____(ColumnAggregateExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnSumToBeBetween is a \
Column Aggregate Expectation.
Column Aggregate Expectations are one of the most common types of Expectation.
They are evaluated for a single column, and produce an aggregate Metric, such as a mean, standard deviation, number of unique values, column type, etc.
If that Metric meets the conditions you set, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
min_value (comparable type or None): \
{MIN_VALUE_DESCRIPTION}
max_value (comparable type or None): \
{MAX_VALUE_DESCRIPTION}
strict_min (boolean): \
{STRICT_MIN_DESCRIPTION} default=False.
strict_max (boolean): \
{STRICT_MAX_DESCRIPTION} default=False.
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
* observed_value field in the result object is customized for this expectation to be a list \
representing the actual column sum
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 1.3 7
2 .8 2.5
3 2 3
Code Examples:
Passing Case:
Input:
ExpectColumnSumToBeBetween(
column="test",
min_value=2,
max_value=6
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 5.1
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnSumToBeBetween(
column="test2",
min_value=2,
max_value=6
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 13.5
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
min_value: Optional[Comparable] = pydantic.Field(
default=None, description=MIN_VALUE_DESCRIPTION
)
max_value: Optional[Comparable] = pydantic.Field(
default=None, description=MAX_VALUE_DESCRIPTION
)
strict_min: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MIN_DESCRIPTION
)
strict_max: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MAX_DESCRIPTION
)
# This dictionary contains metadata for display in the public gallery
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP
metric_dependencies = ("column.sum",)
success_keys = (
"min_value",
"strict_min",
"max_value",
"strict_max",
)
args_keys = (
"column",
"min_value",
"max_value",
"strict_min",
"strict_max",
)
class Config:
title = "Expect column sum to be between"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectColumnSumToBeBetween]) -> None:
ColumnAggregateExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
""" A Column Map Metric Decorator for the Sum"""
@classmethod
@override
def _prescriptive_template(
cls, renderer_configuration: RendererConfiguration
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.min_value and not params.max_value:
template_str = "sum may have any numerical value."
else:
at_least_str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if params.min_value and params.max_value:
template_str = (
f"sum must be {at_least_str} $min_value and {at_most_str} $max_value."
)
elif not params.min_value:
template_str = f"sum must be {at_most_str} $max_value."
else:
template_str = f"sum must be {at_least_str} $min_value."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
@override
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> list[RenderedStringTemplateContent]:
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
params = substitute_none_for_missing(
renderer_configuration.kwargs,
[
"column",
"min_value",
"max_value",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "sum may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = (
f"sum must be {at_least_str} $min_value and {at_most_str} $max_value."
)
elif params["min_value"] is None:
template_str = f"sum must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"sum must be {at_least_str} $min_value."
else:
raise ValueError("unresolvable template_str") # noqa: TRY003 # FIXME CoP
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
styling = runtime_configuration.get("styling") if runtime_configuration else None
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={"template": template_str, "params": params, "styling": styling},
)
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="column.sum",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectColumnSumToBeBetween |
python | pytorch__pytorch | torch/amp/grad_scaler.py | {
"start": 1344,
"end": 1536
} | class ____(Enum):
READY = 0
UNSCALED = 1
STEPPED = 2
def _refresh_per_optimizer_state() -> dict[str, Any]:
return {"stage": OptState.READY, "found_inf_per_device": {}}
| OptState |
python | django__django | django/db/models/fields/files.py | {
"start": 15429,
"end": 20186
} | class ____(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(
self,
verbose_name=None,
name=None,
width_field=None,
height_field=None,
**kwargs,
):
self.width_field, self.height_field = width_field, height_field
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_image_library_installed(),
]
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
"Cannot use ImageField because Pillow is not installed.",
hint=(
"Get Pillow at https://pypi.org/project/Pillow/ "
'or run command "python -m pip install Pillow".'
),
obj=self,
id="fields.E210",
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.width_field:
kwargs["width_field"] = self.width_field
if self.height_field:
kwargs["height_field"] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
# with width_field/height_field.
if not cls._meta.abstract and (self.width_field or self.height_field):
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields or if
# the field is deferred.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields or self.attname not in instance.__dict__:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not (
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.ImageField,
**kwargs,
}
)
| ImageField |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 10555,
"end": 10666
} | class ____(Message):
message = "'...' %% ... `*` specifier requires sequence"
| PercentFormatStarRequiresSequence |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 6256,
"end": 6344
} | class ____(ServerTimeoutError):
"""Connection timeout error."""
| ConnectionTimeoutError |
python | MTrajK__coding-problems | Linked Lists/merge_k_sorted_ll.py | {
"start": 725,
"end": 906
} | class ____:
def __init__(self, node):
self.val = node.val
self.node = node
def __lt__(self, other):
return self.val < other.val
# priority queue
| PQNode |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/testing/codegen.py | {
"start": 1996,
"end": 2160
} | class ____(NodeSampler):
sample_map = dict((
('new', 1),
('existing', 1),
))
N_CONTROLFLOW_STATEMENTS = 10
N_FUNCTIONDEF_STATEMENTS = 10
| NameSampler |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 4205,
"end": 6309
} | class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def curve(self) -> EllipticCurve:
"""
The EllipticCurve that this key is on.
"""
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
Bit size of a secret scalar for the curve.
"""
@abc.abstractmethod
def public_numbers(self) -> EllipticCurvePublicNumbers:
"""
Returns an EllipticCurvePublicNumbers.
"""
@abc.abstractmethod
def public_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PublicFormat,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def verify(
self,
signature: utils.Buffer,
data: utils.Buffer,
signature_algorithm: EllipticCurveSignatureAlgorithm,
) -> None:
"""
Verifies the signature of the data.
"""
@classmethod
def from_encoded_point(
cls, curve: EllipticCurve, data: bytes
) -> EllipticCurvePublicKey:
utils._check_bytes("data", data)
if len(data) == 0:
raise ValueError("data must not be an empty byte string")
if data[0] not in [0x02, 0x03, 0x04]:
raise ValueError("Unsupported elliptic curve point type")
return rust_openssl.ec.from_public_bytes(curve, data)
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __copy__(self) -> EllipticCurvePublicKey:
"""
Returns a copy.
"""
@abc.abstractmethod
def __deepcopy__(self, memo: dict) -> EllipticCurvePublicKey:
"""
Returns a deep copy.
"""
EllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey
EllipticCurvePublicKey.register(rust_openssl.ec.ECPublicKey)
EllipticCurvePrivateNumbers = rust_openssl.ec.EllipticCurvePrivateNumbers
EllipticCurvePublicNumbers = rust_openssl.ec.EllipticCurvePublicNumbers
| EllipticCurvePublicKey |
python | doocs__leetcode | solution/0000-0099/0002.Add Two Numbers/Solution.py | {
"start": 151,
"end": 655
} | class ____:
def addTwoNumbers(
self, l1: Optional[ListNode], l2: Optional[ListNode]
) -> Optional[ListNode]:
dummy = ListNode()
carry, curr = 0, dummy
while l1 or l2 or carry:
s = (l1.val if l1 else 0) + (l2.val if l2 else 0) + carry
carry, val = divmod(s, 10)
curr.next = ListNode(val)
curr = curr.next
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
return dummy.next
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/named_tuples.py | {
"start": 948,
"end": 1776
} | class ____(MyNamedTuple):
pass
def inherited_tuple():
return InheritedNamedTuple(bad=_test_source(), benign=1)
def issue_with_inherited_named_tuple():
a = inherited_tuple()
_test_sink(a.bad)
def no_issue_with_benign_in_inherited_named_tuple():
a = inherited_tuple()
_test_sink(a.benign)
def aliased_indicies_forward():
a = tainted_tuple()
_test_sink(a[0]) # No issue
_test_sink(a[1]) # Issue here
_test_sink(a[2]) # Invalid attribute access
def aliased_indicies_forward_unknown_attribute(i: int):
a = tainted_tuple()
return a[i] # Unknown attribute access
def aliased_indicies_backward(a: MyNamedTuple):
_test_sink(a.benign)
_test_sink(a[1])
def aliased_indicies_backward_unknown_attribute(a: MyNamedTuple, i: int):
_test_sink(a[i])
| InheritedNamedTuple |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail_baseConfig.py | {
"start": 2855,
"end": 3059
} | class ____(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
class Config:
validate_by_name = True
DynamicAliasModel2(y='y', z=1)
DynamicAliasModel2(x='y', z=1)
| DynamicAliasModel2 |
python | wandb__wandb | wandb/sdk/lib/fsm.py | {
"start": 2623,
"end": 3045
} | class ____(Generic[T_FsmInputs, T_FsmContext]):
condition: Callable[[T_FsmInputs], bool]
target_state: Type[FsmState[T_FsmInputs, T_FsmContext]]
action: Optional[Callable[[T_FsmInputs], None]] = None
FsmTableWithContext: TypeAlias = Dict[
Type[FsmState[T_FsmInputs, T_FsmContext]],
Sequence[FsmEntry[T_FsmInputs, T_FsmContext]],
]
FsmTable: TypeAlias = FsmTableWithContext[T_FsmInputs, None]
| FsmEntry |
python | huggingface__transformers | tests/models/vit/test_modeling_vit.py | {
"start": 9451,
"end": 12405
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = ViTModel.from_pretrained("facebook/dino-vits8").to(torch_device)
image_processor = ViTImageProcessor.from_pretrained("facebook/dino-vits8", size=480)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[4.2325, 4.3882, -6.6678], [4.5372, 1.8933, -6.7355], [4.4454, 0.8514, -5.8747]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
@slow
@require_accelerate
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = ViTModel.from_pretrained("facebook/dino-vits8", dtype=torch.float16, device_map="auto")
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
| ViTModelIntegrationTest |
python | getlogbook__logbook | src/logbook/queues.py | {
"start": 4046,
"end": 5756
} | class ____(Handler):
"""A handler that acts as a message queue publisher, which publishes each
record as json dump. Requires the kombu module.
The queue will be filled with JSON exported log records. To receive such
log records from a queue you can use the :class:`MessageQueueSubscriber`.
For an AMQP backend such as RabbitMQ::
handler = MessageQueueHandler("amqp://guest:guest@localhost//")
This requires the py-amqp or the librabbitmq client library.
For Redis (requires redis client library)::
handler = MessageQueueHandler("redis://localhost:8889/0")
For MongoDB (requires pymongo)::
handler = MessageQueueHandler("mongodb://localhost:27017/logging")
Several other backends are also supported.
Refer to the `kombu`_ documentation
.. _kombu: https://docs.celeryq.dev/projects/kombu/en/latest/introduction.html
"""
def __init__(
self, uri=None, queue="logging", level=NOTSET, filter=None, bubble=False
):
Handler.__init__(self, level, filter, bubble)
try:
import kombu
except ImportError:
raise RuntimeError(
"The kombu library is required for the RabbitMQSubscriber."
)
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def export_record(self, record):
"""Exports the record into a dictionary ready for JSON dumping."""
return record.to_dict(json_safe=True)
def emit(self, record):
self.queue.put(self.export_record(record))
def close(self):
self.queue.close()
RabbitMQHandler = MessageQueueHandler
| MessageQueueHandler |
python | ray-project__ray | doc/source/tune/doc_code/fault_tolerance.py | {
"start": 2685,
"end": 4332
} | class ____:
def __init__(self, model_id):
self.model_id = model_id
# Load weights based on the `model_id`...
def train_fn(config):
# Retrieve the model from the object store.
model = ray.get(config["model_ref"])
print(model.model_id)
# These models may be large, so `ray.put` them in the Ray Object Store
# to share the models between trials.
model_refs = [ray.put(LargeModel(1)), ray.put(LargeModel(2))]
tuner = tune.Tuner(
train_fn,
# Tune over the object references!
param_space={"model_ref": tune.grid_search(model_refs)},
run_config=tune.RunConfig(
storage_path=os.path.expanduser("~/ray_results"), name="restore_object_refs"
),
)
tuner.fit()
# __ft_restore_objrefs_initial_end__
if ray.is_initialized():
ray.shutdown()
# __ft_restore_objrefs_restored_start__
# Re-create the objects and put them in the object store.
param_space = {
"model_ref": tune.grid_search([ray.put(LargeModel(1)), ray.put(LargeModel(2))])
}
tuner = tune.Tuner.restore(
os.path.expanduser("~/ray_results/restore_object_refs"),
trainable=train_fn,
# Re-specify the `param_space` to update the object references.
param_space=param_space,
resume_errored=True,
)
tuner.fit()
# __ft_restore_objrefs_restored_end__
# __ft_trial_failure_start__
from ray import tune
tuner = tune.Tuner(
trainable,
param_space={"num_epochs": 10},
run_config=tune.RunConfig(
storage_path=os.path.expanduser("~/ray_results"),
name="trial_fault_tolerance",
failure_config=tune.FailureConfig(max_failures=3),
),
)
tuner.fit()
# __ft_trial_failure_end__
| LargeModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py | {
"start": 5545,
"end": 5808
} | class ____:
__slots__ = (
"foo"
# strange comment 1
,
# comment about bar
"bar"
# strange comment 2
,
)
__slots__ = {"foo", "bar",
"baz", "bingo"
}
| BezierBuilder4 |
python | Textualize__textual | examples/breakpoints.py | {
"start": 333,
"end": 1277
} | class ____(App):
# A breakpoint consists of a width and a class name to set
HORIZONTAL_BREAKPOINTS = [
(0, "-narrow"),
(40, "-normal"),
(80, "-wide"),
(120, "-very-wide"),
]
CSS = """
Screen {
Placeholder { padding: 2; }
Grid { grid-rows: auto; height: auto; }
# Change the styles according to the breakpoint classes
&.-narrow {
Grid { grid-size: 1; }
}
&.-normal {
Grid { grid-size: 2; }
}
&.-wide {
Grid { grid-size: 4; }
}
&.-very-wide {
Grid { grid-size: 6; }
}
}
"""
def compose(self) -> ComposeResult:
yield Markdown(HELP)
with Grid():
for n in range(16):
yield Placeholder(f"Placeholder {n+1}")
yield Footer()
if __name__ == "__main__":
BreakpointApp().run()
| BreakpointApp |
python | numba__numba | numba/core/caching.py | {
"start": 1617,
"end": 1930
} | class ____(_Cache):
@property
def cache_path(self):
return None
def load_overload(self, sig, target_context):
pass
def save_overload(self, sig, cres):
pass
def enable(self):
pass
def disable(self):
pass
def flush(self):
pass
| NullCache |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-gigachat/llama_index/embeddings/gigachat/base.py | {
"start": 426,
"end": 4778
} | class ____(BaseEmbedding):
"""
GigaChat encoder class for generating embeddings.
Attributes:
_client (Optional[GigaChat]): Instance of the GigaChat client.
type (str): Type identifier for the encoder, which is "gigachat".
Example:
.. code-block:: python
from llama_index.embeddings.gigachat import GigaChatEmbeddings
embeddings = GigaChatEmbeddings(
credentials=..., scope=..., verify_ssl_certs=False
)
"""
_client: Optional[GigaChat] = PrivateAttr()
type: str = "gigachat"
def __init__(
self,
name: Optional[str] = "Embeddings",
auth_data: Optional[str] = None,
scope: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
auth_data = get_from_param_or_env(
"auth_data", auth_data, "GIGACHAT_AUTH_DATA", ""
)
if not auth_data:
raise ValueError(
"You must provide an AUTH DATA to use GigaChat. "
"You can either pass it in as an argument or set it `GIGACHAT_AUTH_DATA`."
)
if scope is None:
raise ValueError(
"""
GigaChat scope cannot be 'None'.
Set 'GIGACHAT_API_PERS' for personal use or 'GIGACHAT_API_CORP' for corporate use.
"""
)
super().__init__(
model_name=name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
try:
self._client = GigaChat(
scope=scope, credentials=auth_data, verify_ssl_certs=False
)
except Exception as e:
raise ValueError(f"GigaChat client failed to initialize. Error: {e}") from e
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "GigaChatEmbedding"
def _get_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""
Synchronously Embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = self._client.embeddings(queries).data
return [embeds_obj.embedding for embeds_obj in embeddings]
async def _aget_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""
Asynchronously embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = (await self._client.aembeddings(queries)).data
return [embeds_obj.embedding for embeds_obj in embeddings]
def _get_query_embedding(self, query: List[str]) -> List[float]:
"""
Synchronously embed a document using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return self._client.embeddings(query).data[0].embedding
async def _aget_query_embedding(self, query: List[str]) -> List[float]:
"""
Asynchronously embed a query using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return (await self._client.aembeddings(query)).data[0].embedding
def _get_text_embedding(self, text: str) -> List[float]:
"""
Synchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._client.embeddings([text]).data[0].embedding
async def _aget_text_embedding(self, text: str) -> List[float]:
"""
Asynchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return (await self._client.aembeddings([text])).data[0].embedding
| GigaChatEmbedding |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/chat_models/test_base.py | {
"start": 42102,
"end": 44292
} | class ____(Exception):
"""Mock API error with response attribute."""
def __init__(self, message: str, response: MockResponse | None = None):
super().__init__(message)
self.message = message
if response is not None:
self.response = response
def test_generate_response_from_error_with_valid_json() -> None:
"""Test `_generate_response_from_error` with valid JSON response."""
response = MockResponse(
status_code=400,
headers={"content-type": "application/json"},
json_data={"error": {"message": "Bad request", "type": "invalid_request"}},
)
error = MockAPIError("API Error", response=response)
generations = _generate_response_from_error(error)
assert len(generations) == 1
generation = generations[0]
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.message, AIMessage)
assert generation.message.content == ""
metadata = generation.message.response_metadata
assert metadata["body"] == {
"error": {"message": "Bad request", "type": "invalid_request"}
}
assert metadata["headers"] == {"content-type": "application/json"}
assert metadata["status_code"] == 400
def test_generate_response_from_error_handles_streaming_response_failure() -> None:
# Simulates scenario where accessing response.json() or response.text
# raises ResponseNotRead on streaming responses
response = MockResponse(
status_code=400,
headers={"content-type": "application/json"},
json_raises=Exception, # Simulates ResponseNotRead or similar
text_raises=Exception,
)
error = MockAPIError("API Error", response=response)
# This should NOT raise an exception, but should handle it gracefully
generations = _generate_response_from_error(error)
assert len(generations) == 1
generation = generations[0]
metadata = generation.message.response_metadata
# When both fail, body should be None instead of raising an exception
assert metadata["body"] is None
assert metadata["headers"] == {"content-type": "application/json"}
assert metadata["status_code"] == 400
| MockAPIError |
python | spyder-ide__spyder | spyder/plugins/projects/widgets/qcookiecutter.py | {
"start": 546,
"end": 772
} | class ____:
"""
Namespace to provide a holder for attributes when rendering a template.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
| Namespace |
python | getsentry__sentry | src/sentry/snuba/metrics/query_builder.py | {
"start": 50182,
"end": 50245
} | class ____(_SeriesTotals):
by: dict[str, Any]
| _BySeriesTotals |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/utils.py | {
"start": 3597,
"end": 4051
} | class ____(AirbyteTracedException):
"""Raises the error when `Shopify Store` name is incorrect or couldn't be verified by the Shopify"""
def __init__(self, url, **kwargs) -> None:
self.message = f"The `Shopify Store` name is invalid or missing for `input configuration`, make sure it's valid. Details: {url}"
super().__init__(internal_message=self.message, failure_type=FailureType.config_error, **kwargs)
| ShopifyWrongShopNameError |
python | zarr-developers__zarr-python | src/zarr/errors.py | {
"start": 1169,
"end": 1344
} | class ____(NodeNotFoundError):
"""
Raised when an array isn't found at a certain path.
"""
_msg = "No array found in store {!r} at path {!r}"
| ArrayNotFoundError |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/basetypes.py | {
"start": 15594,
"end": 15838
} | class ____(QtCore.QObject):
"""
WidgetParameterItem is not a QObject, so create a QObject wrapper that items can use for emitting
"""
sigChanging = QtCore.Signal(object, object)
sigChanged = QtCore.Signal(object, object)
| Emitter |
python | sanic-org__sanic | sanic/handlers/error.py | {
"start": 389,
"end": 7290
} | class ____:
"""Process and handle all uncaught exceptions.
This error handling framework is built into the core that can be extended
by the developers to perform a wide range of tasks from recording the error
stats to reporting them to an external service that can be used for
realtime alerting system.
Args:
base (BaseRenderer): The renderer to use for the error pages.
""" # noqa: E501
def __init__(
self,
base: type[BaseRenderer] = TextRenderer,
):
self.cached_handlers: dict[
tuple[type[BaseException], Optional[str]], Optional[RouteHandler]
] = {}
self.debug = False
self.base = base
def _full_lookup(self, exception, route_name: Optional[str] = None):
return self.lookup(exception, route_name)
def _add(
self,
key: tuple[type[BaseException], Optional[str]],
handler: RouteHandler,
) -> None:
if key in self.cached_handlers:
exc, name = key
if name is None:
name = "__ALL_ROUTES__"
message = (
f"Duplicate exception handler definition on: route={name} "
f"and exception={exc}"
)
raise ServerError(message)
self.cached_handlers[key] = handler
def add(self, exception, handler, route_names: Optional[list[str]] = None):
"""Add a new exception handler to an already existing handler object.
Args:
exception (sanic.exceptions.SanicException or Exception): Type
of exception that needs to be handled.
handler (function): Reference to the function that will
handle the exception.
Returns:
None
""" # noqa: E501
if route_names:
for route in route_names:
self._add((exception, route), handler)
else:
self._add((exception, None), handler)
def lookup(self, exception, route_name: Optional[str] = None):
"""Lookup the existing instance of `ErrorHandler` and fetch the registered handler for a specific type of exception.
This method leverages a dict lookup to speedup the retrieval process.
Args:
exception (sanic.exceptions.SanicException or Exception): Type
of exception.
Returns:
Registered function if found, ``None`` otherwise.
""" # noqa: E501
exception_class = type(exception)
for name in (route_name, None):
exception_key = (exception_class, name)
handler = self.cached_handlers.get(exception_key)
if handler:
return handler
for name in (route_name, None):
for ancestor in type.mro(exception_class):
exception_key = (ancestor, name)
if exception_key in self.cached_handlers:
handler = self.cached_handlers[exception_key]
self.cached_handlers[(exception_class, route_name)] = (
handler
)
return handler
if ancestor is BaseException:
break
self.cached_handlers[(exception_class, route_name)] = None
handler = None
return handler
_lookup = _full_lookup
def response(self, request, exception):
"""Fetch and executes an exception handler and returns a response object.
Args:
request (sanic.request.Request): Instance of the request.
exception (sanic.exceptions.SanicException or Exception): Exception to handle.
Returns:
Wrap the return value obtained from the `default` function or the registered handler for that type of exception.
""" # noqa: E501
route_name = request.name if request else None
handler = self._lookup(exception, route_name)
response = None
try:
if handler:
response = handler(request, exception)
if response is None:
response = self.default(request, exception)
except Exception:
try:
url = repr(request.url)
except AttributeError: # no cov
url = "unknown"
response_message = (
'Exception raised in exception handler "%s" for uri: %s'
)
error_logger.exception(response_message, handler.__name__, url)
if self.debug:
return text(response_message % (handler.__name__, url), 500)
else:
return text("An error occurred while handling an error", 500)
return response
def default(self, request: Request, exception: Exception) -> HTTPResponse:
"""Provide a default behavior for the objects of ErrorHandler.
If a developer chooses to extend the ErrorHandler, they can
provide a custom implementation for this method to behave in a way
they see fit.
Args:
request (sanic.request.Request): Incoming request.
exception (sanic.exceptions.SanicException or Exception): Exception object.
Returns:
HTTPResponse: The response object.
Examples:
```python
class CustomErrorHandler(ErrorHandler):
def default(self, request: Request, exception: Exception) -> HTTPResponse:
# Custom logic for handling the exception and creating a response
custom_response = my_custom_logic(request, exception)
return custom_response
app = Sanic("MyApp", error_handler=CustomErrorHandler())
```
""" # noqa: E501
self.log(request, exception)
fallback = request.app.config.FALLBACK_ERROR_FORMAT
return exception_response(
request,
exception,
debug=self.debug,
base=self.base,
fallback=fallback,
)
@staticmethod
def log(request: Request, exception: Exception) -> None:
"""Logs information about an incoming request and the associated exception.
Args:
request (Request): The incoming request to be logged.
exception (Exception): The exception that occurred during the handling of the request.
Returns:
None
""" # noqa: E501
quiet = getattr(exception, "quiet", False)
noisy = getattr(request.app.config, "NOISY_EXCEPTIONS", False)
if quiet is False or noisy is True:
try:
url = repr(request.url)
except AttributeError: # no cov
url = "unknown"
error_logger.exception(
"Exception occurred while handling uri: %s", url
)
| ErrorHandler |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_serverless_functions.py | {
"start": 3716,
"end": 8788
} | class ____(AbstractServerlessTest):
method = "get"
@patch("sentry.integrations.aws_lambda.integration.get_supported_functions")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_basic(
self, mock_gen_aws_client: MagicMock, mock_get_supported_functions: MagicMock
) -> None:
mock_get_supported_functions.return_value = [
{
"FunctionName": "lambdaA",
"Runtime": "nodejs12.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaA",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-layer:3"},
],
},
{
"FunctionName": "lambdaD",
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaD",
"Layers": [{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"}],
},
{
"FunctionName": "lambdaB",
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaB",
"Layers": [{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-layer:2"}],
},
]
assert self.get_response().data == [
{
"name": "lambdaA",
"runtime": "nodejs12.x",
"version": 3,
"outOfDate": False,
"enabled": True,
},
{
"name": "lambdaB",
"runtime": "nodejs10.x",
"version": 2,
"outOfDate": True,
"enabled": True,
},
{
"name": "lambdaD",
"runtime": "nodejs10.x",
"version": -1,
"outOfDate": False,
"enabled": False,
},
]
@patch("sentry.integrations.aws_lambda.integration.get_supported_functions")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_basic_python_functions(
self, mock_gen_aws_client: MagicMock, mock_get_supported_functions: MagicMock
) -> None:
mock_get_supported_functions.return_value = [
{
"FunctionName": "lambdaA",
"Runtime": "python3.6",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaA",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34"},
],
"Environment": {"Variables": {"SENTRY_INITIAL_HANDLER": "handler_string"}},
},
{
"FunctionName": "lambdaD",
"Runtime": "python3.8",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaD",
"Layers": [{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"}],
},
{
"FunctionName": "lambdaB",
"Runtime": "python3.8",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaB",
"Layers": [{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34"}],
},
{
"FunctionName": "lambdaC",
"Runtime": "python3.6",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaC",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-python-layer:22"},
],
"Environment": {"Variables": {"SENTRY_INITIAL_HANDLER": "handler_string"}},
},
]
assert self.get_response().data == [
{
"name": "lambdaA",
"runtime": "python3.6",
"version": 34,
"outOfDate": False,
"enabled": True,
},
{
"name": "lambdaB",
"runtime": "python3.8",
"version": -1,
"outOfDate": False,
"enabled": False,
},
{
"name": "lambdaC",
"runtime": "python3.6",
"version": 22,
"outOfDate": True,
"enabled": True,
},
{
"name": "lambdaD",
"runtime": "python3.8",
"version": -1,
"outOfDate": False,
"enabled": False,
},
]
@override_settings(
SENTRY_SUBNET_SECRET="hush-hush-im-invisible",
SENTRY_CONTROL_ADDRESS="http://controlserver",
)
| OrganizationIntegrationServerlessFunctionsGetTest |
python | neetcode-gh__leetcode | python/0215-kth-largest-element-in-an-array.py | {
"start": 492,
"end": 707
} | class ____:
def findKthLargest(self, nums: List[int], k: int) -> int:
nums.sort()
return nums[len(nums) - k]
# Solution: QuickSelect
# Time Complexity: O(n)
# Extra Space Complexity: O(n)
| Solution1 |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 42888,
"end": 43462
} | class ____(TestCase):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_(
(np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()
)
def test_boolean_edgecase(self):
a = np.array([], dtype="int32")
b = np.array([], dtype="bool")
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype("int32"))
@xpassIfTorchDynamo_np # (reason="TODO")
| TestIndex |
python | huggingface__transformers | src/transformers/models/gemma2/modular_gemma2.py | {
"start": 16603,
"end": 18745
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Gemma2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.attention_type = config.layer_types[layer_idx]
self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
self.mlp = Gemma2MLP(config)
self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| Gemma2DecoderLayer |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 2877,
"end": 3080
} | class ____(WidgetRenderer):
tag = "input"
def create_root(self, context):
root = super().create_root(context)
root.attrib["type"] = "hidden"
return root
| HiddenInputRenderer |
python | pytorch__pytorch | torch/nn/modules/instancenorm.py | {
"start": 9528,
"end": 13169
} | class ____(_InstanceNorm):
r"""Applies Instance Normalization.
This operation applies Instance Normalization
over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, correction=0)`.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm2d` is applied
on each channel of channeled data like RGB images, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm2d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)` or :math:`(C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm2d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm2d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def _get_no_batch_dim(self) -> int:
return 3
def _check_input_dim(self, input) -> None:
if input.dim() not in (3, 4):
raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)")
| InstanceNorm2d |
python | tox-dev__tox | src/tox/config/set_env.py | {
"start": 283,
"end": 6160
} | class ____:
def __init__( # noqa: C901, PLR0912
self, raw: str | dict[str, str] | list[dict[str, str]], name: str, env_name: str | None, root: Path
) -> None:
self.changed = False
self._materialized: dict[str, str] = {} # env vars we already loaded
self._raw: dict[str, str] = {} # could still need replacement
self._needs_replacement: list[str] = [] # env vars that need replacement
self._env_files: list[str] = []
self._replacer: Replacer = lambda s, c: s # noqa: ARG005
self._name, self._env_name, self._root = name, env_name, root
from .loader.replacer import MatchExpression, find_replace_expr # noqa: PLC0415
if isinstance(raw, dict):
self._raw = raw
if "file" in raw: # environment files to be handled later
self._env_files.append(raw["file"])
self._raw.pop("file")
return
if isinstance(raw, list):
self._raw = reduce(lambda a, b: {**a, **b}, raw)
return
for line in raw.splitlines(): # noqa: PLR1702
if line.strip():
if self._is_file_line(line):
self._env_files.append(self._parse_file_line(line))
else:
try:
key, value = self._extract_key_value(line)
if "{" in key:
msg = f"invalid line {line!r} in set_env"
raise ValueError(msg) # noqa: TRY301
except ValueError:
for expr in find_replace_expr(line):
if isinstance(expr, MatchExpression):
self._needs_replacement.append(line)
break
else:
raise
else:
self._raw[key] = value
@staticmethod
def _is_file_line(line: str) -> bool:
return line.startswith("file|")
@staticmethod
def _parse_file_line(line: str) -> str:
return line[len("file|") :]
def use_replacer(self, value: Replacer, args: ConfigLoadArgs) -> None:
self._replacer = value
for filename in self._env_files:
self._raw.update(self._stream_env_file(filename, args))
def _stream_env_file(self, filename: str, args: ConfigLoadArgs) -> Iterator[tuple[str, str]]:
# Our rules in the documentation, some upstream environment file rules (we follow mostly the docker one):
# - https://www.npmjs.com/package/dotenv#rules
# - https://docs.docker.com/compose/env-file/
env_file = Path(self._replacer(filename, args.copy())) # apply any replace options
env_file = env_file if env_file.is_absolute() else self._root / env_file
if not env_file.exists():
msg = f"{env_file} does not exist for set_env"
raise Fail(msg)
for env_line in env_file.read_text().splitlines():
env_line = env_line.strip() # noqa: PLW2901
if not env_line or env_line.startswith("#"):
continue
yield self._extract_key_value(env_line)
@staticmethod
def _extract_key_value(line: str) -> tuple[str, str]:
key, sep, value = line.partition("=")
if sep:
return key.strip(), value.strip()
msg = f"invalid line {line!r} in set_env"
raise ValueError(msg)
def load(self, item: str, args: ConfigLoadArgs | None = None) -> str:
if item in self._materialized:
return self._materialized[item]
raw = self._raw[item]
args = ConfigLoadArgs([], self._name, self._env_name) if args is None else args
args.chain.append(f"env:{item}")
result = self._replacer(raw, args) # apply any replace options
result = result.replace(r"\#", "#") # unroll escaped comment with replacement
self._materialized[item] = result
self._raw.pop(item, None) # if the replace requires the env we may be called again, so allow pop to fail
return result
def __contains__(self, item: object) -> bool:
return isinstance(item, str) and item in iter(self)
def __iter__(self) -> Iterator[str]:
# start with the materialized ones, maybe we don't need to materialize the raw ones
yield from self._materialized.keys()
yield from list(self._raw.keys()) # iterating over this may trigger materialization and change the dict
args = ConfigLoadArgs([], self._name, self._env_name)
while self._needs_replacement:
line = self._needs_replacement.pop(0)
expanded_line = self._replacer(line, args)
sub_raw: dict[str, str] = {}
for sub_line in filter(None, expanded_line.splitlines()):
if not self._is_file_line(sub_line):
sub_raw.__setitem__(*self._extract_key_value(sub_line))
else:
for key, value in self._stream_env_file(self._parse_file_line(sub_line), args):
if key not in self._raw:
sub_raw[key] = value # noqa: PERF403
self._raw.update(sub_raw)
self.changed = True # loading while iterating can cause these values to be missed
yield from sub_raw.keys()
def update(self, param: Mapping[str, str] | SetEnv, *, override: bool = True) -> None:
for key in param:
# do not override something already set explicitly
if override or (key not in self._raw and key not in self._materialized):
value = param.load(key) if isinstance(param, SetEnv) else param[key]
self._materialized[key] = value
self.changed = True
__all__ = ("SetEnv",)
| SetEnv |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 213707,
"end": 214169
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteIssueComment"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The ID of the comment to delete."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteIssueCommentInput |
python | coleifer__peewee | tests/model_save.py | {
"start": 188,
"end": 309
} | class ____(TestModel):
pk = IntegerField(constraints=[SQL('DEFAULT 3')], primary_key=True)
value = IntegerField()
| T2 |
python | getsentry__sentry | src/sentry/auth_v2/apps.py | {
"start": 36,
"end": 132
} | class ____(AppConfig):
name = "sentry.auth_v2"
def ready(self) -> None:
pass
| Config |
python | tiangolo__fastapi | tests/test_security_api_key_header_description.py | {
"start": 250,
"end": 2112
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(api_key)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
client = TestClient(app)
def test_security_api_key():
response = client.get("/users/me", headers={"key": "secret"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "APIKey"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyHeader": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyHeader": {
"type": "apiKey",
"name": "key",
"in": "header",
"description": "An API Key Header",
}
}
},
}
| User |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 10302,
"end": 10401
} | class ____:
def m0(self, a):
return self.m1(a)
def m1(self, a):
return a
| A23 |
python | coleifer__peewee | tests/base_models.py | {
"start": 1402,
"end": 1442
} | class ____(TestModel):
a = TextField()
| A |
python | Pylons__pyramid | src/pyramid/config/views.py | {
"start": 7493,
"end": 85032
} | class ____:
@viewdefaults
@action_method
def add_view(
self,
view=None,
name="",
for_=None,
permission=None,
request_type=None,
route_name=None,
request_method=None,
request_param=None,
containment=None,
attr=None,
renderer=None,
wrapper=None,
xhr=None,
accept=None,
header=None,
path_info=None,
custom_predicates=(),
context=None,
decorator=None,
mapper=None,
http_cache=None,
match_param=None,
require_csrf=None,
exception_only=False,
**view_options,
):
"""Add a :term:`view configuration` to the current
configuration state. Arguments to ``add_view`` are broken
down below into *predicate* arguments and *non-predicate*
arguments. Predicate arguments narrow the circumstances in
which the view callable will be invoked when a request is
presented to :app:`Pyramid`; non-predicate arguments are
informational.
Non-Predicate Arguments
view
A :term:`view callable` or a :term:`dotted Python name`
which refers to a view callable. This argument is required
unless a ``renderer`` argument also exists. If a
``renderer`` argument is passed, and a ``view`` argument is
not provided, the view callable defaults to a callable that
returns an empty dictionary (see
:ref:`views_which_use_a_renderer`).
permission
A :term:`permission` that the user must possess in order to invoke
the :term:`view callable`. See :ref:`view_security_section` for
more information about view security and permissions. This is
often a string like ``view`` or ``edit``.
If ``permission`` is omitted, a *default* permission may be used
for this view registration if one was named as the
:class:`pyramid.config.Configurator` constructor's
``default_permission`` argument, or if
:meth:`pyramid.config.Configurator.set_default_permission` was used
prior to this view registration. Pass the value
:data:`pyramid.security.NO_PERMISSION_REQUIRED` as the permission
argument to explicitly indicate that the view should always be
executable by entirely anonymous users, regardless of the default
permission, bypassing any :term:`authorization policy` that may be
in effect.
attr
This knob is most useful when the view definition is a class.
The view machinery defaults to using the ``__call__`` method
of the :term:`view callable` (or the function itself, if the
view callable is a function) to obtain a response. The
``attr`` value allows you to vary the method attribute used
to obtain the response. For example, if your view was a
class, and the class has a method named ``index`` and you
wanted to use this method instead of the class' ``__call__``
method to return the response, you'd say ``attr="index"`` in the
view configuration for the view.
renderer
This is either a single string term (e.g. ``json``) or a
string implying a path or :term:`asset specification`
(e.g. ``templates/views.pt``) naming a :term:`renderer`
implementation. If the ``renderer`` value does not contain
a dot ``.``, the specified string will be used to look up a
renderer implementation, and that renderer implementation
will be used to construct a response from the view return
value. If the ``renderer`` value contains a dot (``.``),
the specified term will be treated as a path, and the
filename extension of the last element in the path will be
used to look up the renderer implementation, which will be
passed the full path. The renderer implementation will be
used to construct a :term:`response` from the view return
value.
Note that if the view itself returns a :term:`response` (see
:ref:`the_response`), the specified renderer implementation
is never called.
When the renderer is a path, although a path is usually just
a simple relative pathname (e.g. ``templates/foo.pt``,
implying that a template named "foo.pt" is in the
"templates" directory relative to the directory of the
current :term:`package` of the Configurator), a path can be
absolute, starting with a slash on UNIX or a drive letter
prefix on Windows. The path can alternately be a
:term:`asset specification` in the form
``some.dotted.package_name:relative/path``, making it
possible to address template assets which live in a
separate package.
The ``renderer`` attribute is optional. If it is not
defined, the "null" renderer is assumed (no rendering is
performed and the value is passed back to the upstream
:app:`Pyramid` machinery unmodified).
http_cache
.. versionadded:: 1.1
When you supply an ``http_cache`` value to a view configuration,
the ``Expires`` and ``Cache-Control`` headers of a response
generated by the associated view callable are modified. The value
for ``http_cache`` may be one of the following:
- A nonzero integer. If it's a nonzero integer, it's treated as a
number of seconds. This number of seconds will be used to
compute the ``Expires`` header and the ``Cache-Control:
max-age`` parameter of responses to requests which call this view.
For example: ``http_cache=3600`` instructs the requesting browser
to 'cache this response for an hour, please'.
- A ``datetime.timedelta`` instance. If it's a
``datetime.timedelta`` instance, it will be converted into a
number of seconds, and that number of seconds will be used to
compute the ``Expires`` header and the ``Cache-Control:
max-age`` parameter of responses to requests which call this view.
For example: ``http_cache=datetime.timedelta(days=1)`` instructs
the requesting browser to 'cache this response for a day, please'.
- Zero (``0``). If the value is zero, the ``Cache-Control`` and
``Expires`` headers present in all responses from this view will
be composed such that client browser cache (and any intermediate
caches) are instructed to never cache the response.
- A two-tuple. If it's a two tuple (e.g. ``http_cache=(1,
{'public':True})``), the first value in the tuple may be a
nonzero integer or a ``datetime.timedelta`` instance; in either
case this value will be used as the number of seconds to cache
the response. The second value in the tuple must be a
dictionary. The values present in the dictionary will be used as
input to the ``Cache-Control`` response header. For example:
``http_cache=(3600, {'public':True})`` means 'cache for an hour,
and add ``public`` to the Cache-Control header of the response'.
All keys and values supported by the
``webob.cachecontrol.CacheControl`` interface may be added to the
dictionary. Supplying ``{'public':True}`` is equivalent to
calling ``response.cache_control.public = True``.
Providing a non-tuple value as ``http_cache`` is equivalent to
calling ``response.cache_expires(value)`` within your view's body.
Providing a two-tuple value as ``http_cache`` is equivalent to
calling ``response.cache_expires(value[0], **value[1])`` within your
view's body.
If you wish to avoid influencing, the ``Expires`` header, and
instead wish to only influence ``Cache-Control`` headers, pass a
tuple as ``http_cache`` with the first element of ``None``, e.g.:
``(None, {'public':True})``.
If you wish to prevent a view that uses ``http_cache`` in its
configuration from having its caching response headers changed by
this machinery, set ``response.cache_control.prevent_auto = True``
before returning the response from the view. This effectively
disables any HTTP caching done by ``http_cache`` for that response.
require_csrf
.. versionadded:: 1.7
A boolean option or ``None``. Default: ``None``.
If this option is set to ``True`` then CSRF checks will be enabled
for requests to this view. The required token or header default to
``csrf_token`` and ``X-CSRF-Token``, respectively.
CSRF checks only affect "unsafe" methods as defined by RFC2616. By
default, these methods are anything except
``GET``, ``HEAD``, ``OPTIONS``, and ``TRACE``.
The defaults here may be overridden by
:meth:`pyramid.config.Configurator.set_default_csrf_options`.
This feature requires a configured :term:`session factory`.
If this option is set to ``False`` then CSRF checks will be disabled
regardless of the default ``require_csrf`` setting passed
to ``set_default_csrf_options``.
See :ref:`auto_csrf_checking` for more information.
wrapper
The :term:`view name` of a different :term:`view
configuration` which will receive the response body of this
view as the ``request.wrapped_body`` attribute of its own
:term:`request`, and the :term:`response` returned by this
view as the ``request.wrapped_response`` attribute of its
own request. Using a wrapper makes it possible to "chain"
views together to form a composite response. The response
of the outermost wrapper view will be returned to the user.
The wrapper view will be found as any view is found: see
:ref:`view_lookup`. The "best" wrapper view will be found
based on the lookup ordering: "under the hood" this wrapper
view is looked up via
``pyramid.view.render_view_to_response(context, request,
'wrapper_viewname')``. The context and request of a wrapper
view is the same context and request of the inner view. If
this attribute is unspecified, no view wrapping is done.
decorator
A :term:`dotted Python name` to function (or the function itself,
or an iterable of the aforementioned) which will be used to
decorate the registered :term:`view callable`. The decorator
function(s) will be called with the view callable as a single
argument. The view callable it is passed will accept
``(context, request)``. The decorator(s) must return a
replacement view callable which also accepts ``(context,
request)``.
If decorator is an iterable, the callables will be combined and
used in the order provided as a decorator.
For example::
@view_config(...,
decorator=(decorator2,
decorator1))
def myview(request):
....
Is similar to doing::
@view_config(...)
@decorator2
@decorator1
def myview(request):
...
Except with the existing benefits of ``decorator=`` (having a common
decorator syntax for all view calling conventions and not having to
think about preserving function attributes such as ``__name__`` and
``__module__`` within decorator logic).
An important distinction is that each decorator will receive a
response object implementing :class:`pyramid.interfaces.IResponse`
instead of the raw value returned from the view callable. All
decorators in the chain must return a response object or raise an
exception:
.. code-block:: python
def log_timer(wrapped):
def wrapper(context, request):
start = time.time()
response = wrapped(context, request)
duration = time.time() - start
response.headers['X-View-Time'] = '%.3f' % (duration,)
log.info('view took %.3f seconds', duration)
return response
return wrapper
.. versionchanged:: 1.4a4
Passing an iterable.
mapper
A Python object or :term:`dotted Python name` which refers to a
:term:`view mapper`, or ``None``. By default it is ``None``, which
indicates that the view should use the default view mapper. This
plug-point is useful for Pyramid extension developers, but it's not
very useful for 'civilians' who are just developing stock Pyramid
applications. Pay no attention to the man behind the curtain.
accept
A :term:`media type` that will be matched against the ``Accept``
HTTP request header. If this value is specified, it must be a
specific media type such as ``text/html`` or ``text/html;level=1``.
If the media type is acceptable by the ``Accept`` header of the
request, or if the ``Accept`` header isn't set at all in the request,
this predicate will match. If this does not match the ``Accept``
header of the request, view matching continues.
If ``accept`` is not specified, the ``HTTP_ACCEPT`` HTTP header is
not taken into consideration when deciding whether or not to invoke
the associated view callable.
The ``accept`` argument is technically not a predicate and does
not support wrapping with :func:`pyramid.config.not_`.
See :ref:`accept_content_negotiation` for more information.
.. versionchanged:: 1.10
Specifying a media range is deprecated and will be removed in
:app:`Pyramid` 2.0. Use explicit media types to avoid any
ambiguities in content negotiation.
.. versionchanged:: 2.0
Removed support for media ranges.
exception_only
.. versionadded:: 1.8
When this value is ``True``, the ``context`` argument must be
a subclass of ``Exception``. This flag indicates that only an
:term:`exception view` should be created, and that this view should
not match if the traversal :term:`context` matches the ``context``
argument. If the ``context`` is a subclass of ``Exception`` and
this value is ``False`` (the default), then a view will be
registered to match the traversal :term:`context` as well.
Predicate Arguments
name
The :term:`view name`. Read :ref:`traversal_chapter` to
understand the concept of a view name.
context
An object or a :term:`dotted Python name` referring to an
interface or class object that the :term:`context` must be
an instance of, *or* the :term:`interface` that the
:term:`context` must provide in order for this view to be
found and called. This predicate is true when the
:term:`context` is an instance of the represented class or
if the :term:`context` provides the represented interface;
it is otherwise false. This argument may also be provided
to ``add_view`` as ``for_`` (an older, still-supported
spelling). If the view should *only* match when handling
exceptions, then set the ``exception_only`` to ``True``.
route_name
This value must match the ``name`` of a :term:`route
configuration` declaration (see :ref:`urldispatch_chapter`)
that must match before this view will be called.
request_type
This value should be an :term:`interface` that the
:term:`request` must provide in order for this view to be
found and called. This value exists only for backwards
compatibility purposes.
request_method
This value can be either a string (such as ``"GET"``, ``"POST"``,
``"PUT"``, ``"DELETE"``, ``"HEAD"`` or ``"OPTIONS"``) representing
an HTTP ``REQUEST_METHOD``, or a tuple containing one or more of
these strings. A view declaration with this argument ensures that
the view will only be called when the ``method`` attribute of the
request (aka the ``REQUEST_METHOD`` of the WSGI environment) matches
a supplied value. Note that use of ``GET`` also implies that the
view will respond to ``HEAD`` as of Pyramid 1.4.
.. versionchanged:: 1.2
The ability to pass a tuple of items as ``request_method``.
Previous versions allowed only a string.
request_param
This value can be any string or any sequence of strings. A view
declaration with this argument ensures that the view will only be
called when the :term:`request` has a key in the ``request.params``
dictionary (an HTTP ``GET`` or ``POST`` variable) that has a
name which matches the supplied value (if the value is a string)
or values (if the value is a tuple). If any value
supplied has a ``=`` sign in it,
e.g. ``request_param="foo=123"``, then the key (``foo``)
must both exist in the ``request.params`` dictionary, *and*
the value must match the right hand side of the expression
(``123``) for the view to "match" the current request.
match_param
.. versionadded:: 1.2
This value can be a string of the format "key=value" or a tuple
containing one or more of these strings.
A view declaration with this argument ensures that the view will
only be called when the :term:`request` has key/value pairs in its
:term:`matchdict` that equal those supplied in the predicate.
e.g. ``match_param="action=edit"`` would require the ``action``
parameter in the :term:`matchdict` match the right hand side of
the expression (``edit``) for the view to "match" the current
request.
If the ``match_param`` is a tuple, every key/value pair must match
for the predicate to pass.
containment
This value should be a Python class or :term:`interface` (or a
:term:`dotted Python name`) that an object in the
:term:`lineage` of the context must provide in order for this view
to be found and called. The nodes in your object graph must be
"location-aware" to use this feature. See
:ref:`location_aware` for more information about
location-awareness.
xhr
This value should be either ``True`` or ``False``. If this
value is specified and is ``True``, the :term:`request`
must possess an ``HTTP_X_REQUESTED_WITH`` (aka
``X-Requested-With``) header that has the value
``XMLHttpRequest`` for this view to be found and called.
This is useful for detecting AJAX requests issued from
jQuery, Prototype and other Javascript libraries.
header
This argument can be a string or an iterable of strings for HTTP
headers. The matching is determined as follow:
- If a string does not contain a ``:`` (colon), it will be
considered to be a header name (example ``If-Modified-Since``).
In this case, the header specified by the name must be present
in the request for this string to match. Case is not significant.
- If a string contains a colon, it will be considered a
name/value pair (for example ``User-Agent:Mozilla/.*`` or
``Host:localhost``), where the value part is a regular
expression. The header specified by the name must be present
in the request *and* the regular expression specified as the
value part must match the value of the request header. Case is
not significant for the header name, but it is for the value.
All strings must be matched for this predicate to return ``True``.
If this predicate returns ``False``, view matching continues.
path_info
This value represents a regular expression pattern that will
be tested against the ``PATH_INFO`` WSGI environment
variable. If the regex matches, this predicate will be
``True``.
physical_path
If specified, this value should be a string or a tuple representing
the :term:`physical path` of the context found via traversal for this
predicate to match as true. For example: ``physical_path='/'`` or
``physical_path='/a/b/c'`` or ``physical_path=('', 'a', 'b', 'c')``.
This is not a path prefix match or a regex, it's a whole-path match.
It's useful when you want to always potentially show a view when some
object is traversed to, but you can't be sure about what kind of
object it will be, so you can't use the ``context`` predicate. The
individual path elements in between slash characters or in tuple
elements should be the Unicode representation of the name of the
resource and should not be encoded in any way.
.. versionadded:: 1.4a3
is_authenticated
This value, if specified, must be either ``True`` or ``False``.
If it is specified and ``True``, only a request from an authenticated
user, as determined by the :term:`security policy` in use, will
satisfy the predicate.
If it is specified and ``False``, only a request from a user who is
not authenticated will satisfy the predicate.
.. versionadded:: 2.0
effective_principals
If specified, this value should be a :term:`principal` identifier or
a sequence of principal identifiers. If the
:attr:`pyramid.request.Request.effective_principals` property
indicates that every principal named in the argument list is present
in the current request, this predicate will return True; otherwise it
will return False. For example:
``effective_principals=pyramid.authorization.Authenticated`` or
``effective_principals=('fred', 'group:admins')``.
.. versionadded:: 1.4a4
.. deprecated:: 2.0
Use ``is_authenticated`` or a custom predicate.
custom_predicates
.. deprecated:: 1.5
This value should be a sequence of references to custom
predicate callables. Each custom predicate callable
should accept two arguments:
``context`` and ``request`` and should return either
``True`` or ``False`` after doing arbitrary evaluation of
the context and/or the request. The ability to register
custom view predicates via
:meth:`pyramid.config.Configurator.add_view_predicate`
obsoletes this argument, but it is kept around for backwards
compatibility.
\\*\\*view_options
Pass extra keyword parameters to use custom predicates
or set a value for a view deriver. See
:meth:`pyramid.config.Configurator.add_view_predicate` and
:meth:`pyramid.config.Configurator.add_view_deriver`. See
:ref:`view_and_route_predicates` for more information about
custom predicates and :ref:`view_derivers` for information
about view derivers.
.. versionadded: 1.4a1
.. versionchanged: 1.7
Support setting view deriver options. Previously, only custom
view predicate values could be supplied.
.. versionchanged:: 2.0
Removed support for the ``check_csrf`` predicate.
"""
if custom_predicates:
warnings.warn(
(
'The "custom_predicates" argument to '
'Configurator.add_view is deprecated as of Pyramid 1.5. '
'Use "config.add_view_predicate" and use the registered '
'view predicate as a predicate argument to add_view '
'instead. See "Adding A Custom View, Route, or '
'Subscriber Predicate" in the "Hooks" chapter of the '
'documentation for more information.'
),
DeprecationWarning,
stacklevel=4,
)
if 'effective_principals' in view_options:
warnings.warn(
(
'The new security policy has deprecated '
'effective_principals. See "Upgrading '
'Authentication/Authorization" in "What\'s New in '
'Pyramid 2.0" of the documentation for more information.'
),
DeprecationWarning,
stacklevel=4,
)
if accept is not None:
if is_nonstr_iter(accept):
raise ConfigurationError(
'A list is not supported in the "accept" view predicate.'
)
accept = normalize_accept_offer(accept)
view = self.maybe_dotted(view)
context = self.maybe_dotted(context)
for_ = self.maybe_dotted(for_)
containment = self.maybe_dotted(containment)
mapper = self.maybe_dotted(mapper)
if is_nonstr_iter(decorator):
decorator = combine_decorators(*map(self.maybe_dotted, decorator))
else:
decorator = self.maybe_dotted(decorator)
if not view:
if renderer:
def view(context, request):
return {}
else:
raise ConfigurationError(
'"view" was not specified and no "renderer" specified'
)
if request_type is not None:
request_type = self.maybe_dotted(request_type)
if not IInterface.providedBy(request_type):
raise ConfigurationError(
'request_type must be an interface, not %s' % request_type
)
if context is None:
context = for_
isexc = isexception(context)
if exception_only and not isexc:
raise ConfigurationError(
'view "context" must be an exception type when '
'"exception_only" is True'
)
r_context = context
if r_context is None:
r_context = Interface
if not IInterface.providedBy(r_context):
r_context = implementedBy(r_context)
if isinstance(renderer, str):
renderer = renderers.RendererHelper(
name=renderer, package=self.package, registry=self.registry
)
introspectables = []
ovals = view_options.copy()
ovals.update(
dict(
xhr=xhr,
request_method=request_method,
path_info=path_info,
request_param=request_param,
header=header,
accept=accept,
containment=containment,
request_type=request_type,
match_param=match_param,
custom=predvalseq(custom_predicates),
)
)
def discrim_func():
# We need to defer the discriminator until we know what the phash
# is. It can't be computed any sooner because thirdparty
# predicates/view derivers may not yet exist when add_view is
# called.
predlist = self.get_predlist('view')
valid_predicates = predlist.names()
pvals = {}
dvals = {}
for k, v in ovals.items():
if k in valid_predicates:
pvals[k] = v
else:
dvals[k] = v
self._check_view_options(**dvals)
order, preds, phash = predlist.make(self, **pvals)
view_intr.update(
{'phash': phash, 'order': order, 'predicates': preds}
)
return ('view', context, name, route_name, phash)
discriminator = Deferred(discrim_func)
if inspect.isclass(view) and attr:
view_desc = 'method {!r} of {}'.format(
attr,
self.object_description(view),
)
else:
view_desc = self.object_description(view)
tmpl_intr = None
view_intr = self.introspectable(
'views', discriminator, view_desc, 'view'
)
view_intr.update(
dict(
name=name,
context=context,
exception_only=exception_only,
containment=containment,
request_param=request_param,
request_methods=request_method,
route_name=route_name,
attr=attr,
xhr=xhr,
accept=accept,
header=header,
path_info=path_info,
match_param=match_param,
http_cache=http_cache,
require_csrf=require_csrf,
callable=view,
mapper=mapper,
decorator=decorator,
)
)
view_intr.update(view_options)
introspectables.append(view_intr)
def register(permission=permission, renderer=renderer):
request_iface = IRequest
if route_name is not None:
request_iface = self.registry.queryUtility(
IRouteRequest, name=route_name
)
if request_iface is None:
# route configuration should have already happened in
# phase 2
raise ConfigurationError(
'No route named %s found for view registration'
% route_name
)
if renderer is None:
# use default renderer if one exists (reg'd in phase 1)
if self.registry.queryUtility(IRendererFactory) is not None:
renderer = renderers.RendererHelper(
name=None, package=self.package, registry=self.registry
)
renderer_type = getattr(renderer, 'type', None)
intrspc = self.introspector
if (
renderer_type is not None
and tmpl_intr is not None
and intrspc is not None
and intrspc.get('renderer factories', renderer_type)
is not None
):
# allow failure of registered template factories to be deferred
# until view execution, like other bad renderer factories; if
# we tried to relate this to an existing renderer factory
# without checking if the factory actually existed, we'd end
# up with a KeyError at startup time, which is inconsistent
# with how other bad renderer registrations behave (they throw
# a ValueError at view execution time)
tmpl_intr.relate('renderer factories', renderer.type)
# make a new view separately for normal and exception paths
if not exception_only:
derived_view = derive_view(False, renderer)
register_view(IViewClassifier, request_iface, derived_view)
if isexc:
derived_exc_view = derive_view(True, renderer)
register_view(
IExceptionViewClassifier, request_iface, derived_exc_view
)
if exception_only:
derived_view = derived_exc_view
# if there are two derived views, combine them into one for
# introspection purposes
if not exception_only and isexc:
derived_view = runtime_exc_view(derived_view, derived_exc_view)
derived_view.__discriminator__ = lambda *arg: discriminator
# __discriminator__ is used by superdynamic systems
# that require it for introspection after manual view lookup;
# see also MultiView.__discriminator__
view_intr['derived_callable'] = derived_view
self.registry._clear_view_lookup_cache()
def derive_view(isexc_only, renderer):
# added by discrim_func above during conflict resolving
preds = view_intr['predicates']
order = view_intr['order']
phash = view_intr['phash']
derived_view = self._derive_view(
view,
route_name=route_name,
permission=permission,
predicates=preds,
attr=attr,
context=context,
exception_only=isexc_only,
renderer=renderer,
wrapper_viewname=wrapper,
viewname=name,
accept=accept,
order=order,
phash=phash,
decorator=decorator,
mapper=mapper,
http_cache=http_cache,
require_csrf=require_csrf,
extra_options=ovals,
)
return derived_view
def register_view(classifier, request_iface, derived_view):
# A multiviews is a set of views which are registered for
# exactly the same context type/request type/name triad. Each
# constituent view in a multiview differs only by the
# predicates which it possesses.
# To find a previously registered view for a context
# type/request type/name triad, we need to use the
# ``registered`` method of the adapter registry rather than
# ``lookup``. ``registered`` ignores interface inheritance
# for the required and provided arguments, returning only a
# view registered previously with the *exact* triad we pass
# in.
# We need to do this three times, because we use three
# different interfaces as the ``provided`` interface while
# doing registrations, and ``registered`` performs exact
# matches on all the arguments it receives.
old_view = None
order, phash = view_intr['order'], view_intr['phash']
registered = self.registry.adapters.registered
for view_type in (IView, ISecuredView, IMultiView):
old_view = registered(
(classifier, request_iface, r_context), view_type, name
)
if old_view is not None:
break
old_phash = getattr(old_view, '__phash__', DEFAULT_PHASH)
is_multiview = IMultiView.providedBy(old_view)
want_multiview = (
is_multiview
# no component was yet registered for exactly this triad
# or only one was registered but with the same phash, meaning
# that this view is an override
or (old_view is not None and old_phash != phash)
)
if not want_multiview:
if hasattr(derived_view, '__call_permissive__'):
view_iface = ISecuredView
else:
view_iface = IView
self.registry.registerAdapter(
derived_view,
(classifier, request_iface, context),
view_iface,
name,
)
else:
# - A view or multiview was already registered for this
# triad, and the new view is not an override.
# XXX we could try to be more efficient here and register
# a non-secured view for a multiview if none of the
# multiview's constituent views have a permission
# associated with them, but this code is getting pretty
# rough already
if is_multiview:
multiview = old_view
else:
multiview = MultiView(name)
old_accept = getattr(old_view, '__accept__', None)
old_order = getattr(old_view, '__order__', MAX_ORDER)
# don't bother passing accept_order here as we know we're
# adding another one right after which will re-sort
multiview.add(old_view, old_order, old_phash, old_accept)
accept_order = self.registry.queryUtility(IAcceptOrder)
multiview.add(derived_view, order, phash, accept, accept_order)
for view_type in (IView, ISecuredView):
# unregister any existing views
self.registry.adapters.unregister(
(classifier, request_iface, r_context),
view_type,
name=name,
)
self.registry.registerAdapter(
multiview,
(classifier, request_iface, context),
IMultiView,
name=name,
)
if mapper:
mapper_intr = self.introspectable(
'view mappers',
discriminator,
'view mapper for %s' % view_desc,
'view mapper',
)
mapper_intr['mapper'] = mapper
mapper_intr.relate('views', discriminator)
introspectables.append(mapper_intr)
if route_name:
view_intr.relate('routes', route_name) # see add_route
if renderer is not None and renderer.name and '.' in renderer.name:
# the renderer is a template
tmpl_intr = self.introspectable(
'templates', discriminator, renderer.name, 'template'
)
tmpl_intr.relate('views', discriminator)
tmpl_intr['name'] = renderer.name
tmpl_intr['type'] = renderer.type
tmpl_intr['renderer'] = renderer
introspectables.append(tmpl_intr)
if permission is not None:
# if a permission exists, register a permission introspectable
perm_intr = self.introspectable(
'permissions', permission, permission, 'permission'
)
perm_intr['value'] = permission
perm_intr.relate('views', discriminator)
introspectables.append(perm_intr)
self.action(discriminator, register, introspectables=introspectables)
def _check_view_options(self, **kw):
# we only need to validate deriver options because the predicates
# were checked by the predlist
derivers = self.registry.getUtility(IViewDerivers)
for deriver in derivers.values():
for opt in getattr(deriver, 'options', []):
kw.pop(opt, None)
if kw:
raise ConfigurationError(f'Unknown view options: {kw}')
def _apply_view_derivers(self, info):
# These derivers are not really derivers and so have fixed order
outer_derivers = [
('attr_wrapped_view', attr_wrapped_view),
('predicated_view', predicated_view),
]
view = info.original_view
derivers = self.registry.getUtility(IViewDerivers)
for name, deriver in reversed(outer_derivers + derivers.sorted()):
view = wraps_view(deriver)(view, info)
return view
@action_method
def add_view_predicate(
self, name, factory, weighs_more_than=None, weighs_less_than=None
):
"""
.. versionadded:: 1.4
Adds a view predicate factory. The associated view predicate can
later be named as a keyword argument to
:meth:`pyramid.config.Configurator.add_view` in the
``predicates`` anonyous keyword argument dictionary.
``name`` should be the name of the predicate. It must be a valid
Python identifier (it will be used as a keyword argument to
``add_view`` by others).
``factory`` should be a :term:`predicate factory` or :term:`dotted
Python name` which refers to a predicate factory.
See :ref:`view_and_route_predicates` for more information.
"""
self._add_predicate(
'view',
name,
factory,
weighs_more_than=weighs_more_than,
weighs_less_than=weighs_less_than,
)
def add_default_view_predicates(self):
p = pyramid.predicates
for name, factory in (
('xhr', p.XHRPredicate),
('request_method', p.RequestMethodPredicate),
('path_info', p.PathInfoPredicate),
('request_param', p.RequestParamPredicate),
('header', p.HeaderPredicate),
('accept', p.AcceptPredicate),
('containment', p.ContainmentPredicate),
('request_type', p.RequestTypePredicate),
('match_param', p.MatchParamPredicate),
('physical_path', p.PhysicalPathPredicate),
('is_authenticated', p.IsAuthenticatedPredicate),
('effective_principals', p.EffectivePrincipalsPredicate),
('custom', p.CustomPredicate),
):
self.add_view_predicate(name, factory)
def add_default_accept_view_order(self):
for accept in (
'text/html',
'application/xhtml+xml',
'application/xml',
'text/xml',
'text/plain',
'application/json',
):
self.add_accept_view_order(accept)
@action_method
def add_accept_view_order(
self, value, weighs_more_than=None, weighs_less_than=None
):
"""
Specify an ordering preference for the ``accept`` view option used
during :term:`view lookup`.
By default, if two views have different ``accept`` options and a
request specifies ``Accept: */*`` or omits the header entirely then
it is random which view will be selected. This method provides a way
to specify a server-side, relative ordering between accept media types.
``value`` should be a :term:`media type` as specified by
:rfc:`7231#section-5.3.2`. For example, ``text/plain;charset=utf8``,
``application/json`` or ``text/html``.
``weighs_more_than`` and ``weighs_less_than`` control the ordering
of media types. Each value may be a string or a list of strings. If
all options for ``weighs_more_than`` (or ``weighs_less_than``) cannot
be found, it is an error.
Earlier calls to ``add_accept_view_order`` are given higher priority
over later calls, assuming similar constraints but standard conflict
resolution mechanisms can be used to override constraints.
See :ref:`accept_content_negotiation` for more information.
.. versionadded:: 1.10
"""
def check_type(than):
than_type, than_subtype, than_params = Accept.parse_offer(than)
# text/plain vs text/html;charset=utf8
if bool(offer_params) ^ bool(than_params):
raise ConfigurationError(
'cannot compare a media type with params to one without '
'params'
)
# text/plain;charset=utf8 vs text/html;charset=utf8
if offer_params and (
offer_subtype != than_subtype or offer_type != than_type
):
raise ConfigurationError(
'cannot compare params across different media types'
)
def normalize_types(thans):
thans = [normalize_accept_offer(than) for than in thans]
for than in thans:
check_type(than)
return thans
value = normalize_accept_offer(value)
offer_type, offer_subtype, offer_params = Accept.parse_offer(value)
if weighs_more_than:
if not is_nonstr_iter(weighs_more_than):
weighs_more_than = [weighs_more_than]
weighs_more_than = normalize_types(weighs_more_than)
if weighs_less_than:
if not is_nonstr_iter(weighs_less_than):
weighs_less_than = [weighs_less_than]
weighs_less_than = normalize_types(weighs_less_than)
discriminator = ('accept view order', value)
intr = self.introspectable(
'accept view order', value, value, 'accept view order'
)
intr['value'] = value
intr['weighs_more_than'] = weighs_more_than
intr['weighs_less_than'] = weighs_less_than
def register():
sorter = self.registry.queryUtility(IAcceptOrder)
if sorter is None:
sorter = TopologicalSorter()
self.registry.registerUtility(sorter, IAcceptOrder)
sorter.add(
value, value, before=weighs_more_than, after=weighs_less_than
)
self.action(
discriminator,
register,
introspectables=(intr,),
order=PHASE1_CONFIG,
) # must be registered before add_view
@action_method
def add_view_deriver(self, deriver, name=None, under=None, over=None):
"""
.. versionadded:: 1.7
Add a :term:`view deriver` to the view pipeline. View derivers are
a feature used by extension authors to wrap views in custom code
controllable by view-specific options.
``deriver`` should be a callable conforming to the
:class:`pyramid.interfaces.IViewDeriver` interface.
``name`` should be the name of the view deriver. There are no
restrictions on the name of a view deriver. If left unspecified, the
name will be constructed from the name of the ``deriver``.
The ``under`` and ``over`` options can be used to control the ordering
of view derivers by providing hints about where in the view pipeline
the deriver is used. Each option may be a string or a list of strings.
At least one view deriver in each, the over and under directions, must
exist to fully satisfy the constraints.
``under`` means closer to the user-defined :term:`view callable`,
and ``over`` means closer to view pipeline ingress.
The default value for ``over`` is ``rendered_view`` and ``under`` is
``decorated_view``. This places the deriver somewhere between the two
in the view pipeline. If the deriver should be placed elsewhere in the
pipeline, such as above ``decorated_view``, then you MUST also specify
``under`` to something earlier in the order, or a
``CyclicDependencyError`` will be raised when trying to sort the
derivers.
See :ref:`view_derivers` for more information.
"""
deriver = self.maybe_dotted(deriver)
if name is None:
name = deriver.__name__
if name in (INGRESS, VIEW):
raise ConfigurationError(
'%s is a reserved view deriver name' % name
)
if under is None:
under = 'decorated_view'
if over is None:
over = 'rendered_view'
over = as_sorted_tuple(over)
under = as_sorted_tuple(under)
if INGRESS in over:
raise ConfigurationError('%s cannot be over INGRESS' % name)
# ensure everything is always over mapped_view
if VIEW in over and name != 'mapped_view':
over = as_sorted_tuple(over + ('mapped_view',))
if VIEW in under:
raise ConfigurationError('%s cannot be under VIEW' % name)
if 'mapped_view' in under:
raise ConfigurationError('%s cannot be under "mapped_view"' % name)
discriminator = ('view deriver', name)
intr = self.introspectable('view derivers', name, name, 'view deriver')
intr['name'] = name
intr['deriver'] = deriver
intr['under'] = under
intr['over'] = over
def register():
derivers = self.registry.queryUtility(IViewDerivers)
if derivers is None:
derivers = TopologicalSorter(
default_before=None,
default_after=INGRESS,
first=INGRESS,
last=VIEW,
)
self.registry.registerUtility(derivers, IViewDerivers)
derivers.add(name, deriver, before=over, after=under)
self.action(
discriminator,
register,
introspectables=(intr,),
order=PHASE1_CONFIG,
) # must be registered before add_view
def add_default_view_derivers(self):
d = pyramid.viewderivers
derivers = [
('secured_view', d.secured_view),
('owrapped_view', d.owrapped_view),
('http_cached_view', d.http_cached_view),
('decorated_view', d.decorated_view),
('rendered_view', d.rendered_view),
('mapped_view', d.mapped_view),
]
last = INGRESS
for name, deriver in derivers:
self.add_view_deriver(deriver, name=name, under=last, over=VIEW)
last = name
# leave the csrf_view loosely coupled to the rest of the pipeline
# by ensuring nothing in the default pipeline depends on the order
# of the csrf_view
self.add_view_deriver(
d.csrf_view,
'csrf_view',
under='secured_view',
over='owrapped_view',
)
def derive_view(self, view, attr=None, renderer=None):
"""
Create a :term:`view callable` using the function, instance,
or class (or :term:`dotted Python name` referring to the same)
provided as ``view`` object.
.. warning::
This method is typically only used by :app:`Pyramid` framework
extension authors, not by :app:`Pyramid` application developers.
This is API is useful to framework extenders who create
pluggable systems which need to register 'proxy' view
callables for functions, instances, or classes which meet the
requirements of being a :app:`Pyramid` view callable. For
example, a ``some_other_framework`` function in another
framework may want to allow a user to supply a view callable,
but he may want to wrap the view callable in his own before
registering the wrapper as a :app:`Pyramid` view callable.
Because a :app:`Pyramid` view callable can be any of a
number of valid objects, the framework extender will not know
how to call the user-supplied object. Running it through
``derive_view`` normalizes it to a callable which accepts two
arguments: ``context`` and ``request``.
For example:
.. code-block:: python
def some_other_framework(user_supplied_view):
config = Configurator(reg)
proxy_view = config.derive_view(user_supplied_view)
def my_wrapper(context, request):
do_something_that_mutates(request)
return proxy_view(context, request)
config.add_view(my_wrapper)
The ``view`` object provided should be one of the following:
- A function or another non-class callable object that accepts
a :term:`request` as a single positional argument and which
returns a :term:`response` object.
- A function or other non-class callable object that accepts
two positional arguments, ``context, request`` and which
returns a :term:`response` object.
- A class which accepts a single positional argument in its
constructor named ``request``, and which has a ``__call__``
method that accepts no arguments that returns a
:term:`response` object.
- A class which accepts two positional arguments named
``context, request``, and which has a ``__call__`` method
that accepts no arguments that returns a :term:`response`
object.
- A :term:`dotted Python name` which refers to any of the
kinds of objects above.
This API returns a callable which accepts the arguments
``context, request`` and which returns the result of calling
the provided ``view`` object.
The ``attr`` keyword argument is most useful when the view
object is a class. It names the method that should be used as
the callable. If ``attr`` is not provided, the attribute
effectively defaults to ``__call__``. See
:ref:`class_as_view` for more information.
The ``renderer`` keyword argument should be a renderer
name. If supplied, it will cause the returned callable to use
a :term:`renderer` to convert the user-supplied view result to
a :term:`response` object. If a ``renderer`` argument is not
supplied, the user-supplied view must itself return a
:term:`response` object."""
return self._derive_view(view, attr=attr, renderer=renderer)
# b/w compat
def _derive_view(
self,
view,
permission=None,
predicates=(),
attr=None,
renderer=None,
wrapper_viewname=None,
viewname=None,
accept=None,
order=MAX_ORDER,
phash=DEFAULT_PHASH,
decorator=None,
route_name=None,
mapper=None,
http_cache=None,
context=None,
require_csrf=None,
exception_only=False,
extra_options=None,
):
view = self.maybe_dotted(view)
mapper = self.maybe_dotted(mapper)
if isinstance(renderer, str):
renderer = renderers.RendererHelper(
name=renderer, package=self.package, registry=self.registry
)
if renderer is None:
# use default renderer if one exists
if self.registry.queryUtility(IRendererFactory) is not None:
renderer = renderers.RendererHelper(
name=None, package=self.package, registry=self.registry
)
options = dict(
view=view,
context=context,
permission=permission,
attr=attr,
renderer=renderer,
wrapper=wrapper_viewname,
name=viewname,
accept=accept,
mapper=mapper,
decorator=decorator,
http_cache=http_cache,
require_csrf=require_csrf,
route_name=route_name,
)
if extra_options:
options.update(extra_options)
info = ViewDeriverInfo(
view=view,
registry=self.registry,
package=self.package,
predicates=predicates,
exception_only=exception_only,
options=options,
)
# order and phash are only necessary for the predicated view and
# are not really view deriver options
info.order = order
info.phash = phash
return self._apply_view_derivers(info)
@viewdefaults
@action_method
def add_forbidden_view(
self,
view=None,
attr=None,
renderer=None,
wrapper=None,
route_name=None,
request_type=None,
request_method=None,
request_param=None,
containment=None,
xhr=None,
accept=None,
header=None,
path_info=None,
custom_predicates=(),
decorator=None,
mapper=None,
match_param=None,
**view_options,
):
"""Add a forbidden view to the current configuration state. The
view will be called when Pyramid or application code raises a
:exc:`pyramid.httpexceptions.HTTPForbidden` exception and the set of
circumstances implied by the predicates provided are matched. The
simplest example is:
.. code-block:: python
def forbidden(request):
return Response('Forbidden', status='403 Forbidden')
config.add_forbidden_view(forbidden)
If ``view`` argument is not provided, the view callable defaults to
:func:`~pyramid.httpexceptions.default_exceptionresponse_view`.
All arguments have the same meaning as
:meth:`pyramid.config.Configurator.add_view` and each predicate
argument restricts the set of circumstances under which this forbidden
view will be invoked. Unlike
:meth:`pyramid.config.Configurator.add_view`, this method will raise
an exception if passed ``name``, ``permission``, ``require_csrf``,
``context``, ``for_``, or ``exception_only`` keyword arguments. These
argument values make no sense in the context of a forbidden
:term:`exception view`.
.. versionadded:: 1.3
.. versionchanged:: 1.8
The view is created using ``exception_only=True``.
"""
for arg in (
'name',
'permission',
'context',
'for_',
'require_csrf',
'exception_only',
):
if arg in view_options:
raise ConfigurationError(
'%s may not be used as an argument to add_forbidden_view'
% (arg,)
)
if view is None:
view = default_exceptionresponse_view
settings = dict(
view=view,
context=HTTPForbidden,
exception_only=True,
wrapper=wrapper,
request_type=request_type,
request_method=request_method,
request_param=request_param,
containment=containment,
xhr=xhr,
accept=accept,
header=header,
path_info=path_info,
custom_predicates=custom_predicates,
decorator=decorator,
mapper=mapper,
match_param=match_param,
route_name=route_name,
permission=NO_PERMISSION_REQUIRED,
require_csrf=False,
attr=attr,
renderer=renderer,
)
settings.update(view_options)
return self.add_view(**settings)
set_forbidden_view = add_forbidden_view # deprecated sorta-bw-compat alias
@viewdefaults
@action_method
def add_notfound_view(
self,
view=None,
attr=None,
renderer=None,
wrapper=None,
route_name=None,
request_type=None,
request_method=None,
request_param=None,
containment=None,
xhr=None,
accept=None,
header=None,
path_info=None,
custom_predicates=(),
decorator=None,
mapper=None,
match_param=None,
append_slash=False,
**view_options,
):
"""Add a default :term:`Not Found View` to the current configuration
state. The view will be called when Pyramid or application code raises
an :exc:`pyramid.httpexceptions.HTTPNotFound` exception (e.g., when a
view cannot be found for the request). The simplest example is:
.. code-block:: python
def notfound(request):
return Response('Not Found', status='404 Not Found')
config.add_notfound_view(notfound)
If ``view`` argument is not provided, the view callable defaults to
:func:`~pyramid.httpexceptions.default_exceptionresponse_view`.
All arguments except ``append_slash`` have the same meaning as
:meth:`pyramid.config.Configurator.add_view` and each predicate
argument restricts the set of circumstances under which this notfound
view will be invoked. Unlike
:meth:`pyramid.config.Configurator.add_view`, this method will raise
an exception if passed ``name``, ``permission``, ``require_csrf``,
``context``, ``for_``, or ``exception_only`` keyword arguments. These
argument values make no sense in the context of a Not Found View.
If ``append_slash`` is ``True``, when this Not Found View is invoked,
and the current path info does not end in a slash, the notfound logic
will attempt to find a :term:`route` that matches the request's path
info suffixed with a slash. If such a route exists, Pyramid will
issue a redirect to the URL implied by the route; if it does not,
Pyramid will return the result of the view callable provided as
``view``, as normal.
If the argument provided as ``append_slash`` is not a boolean but
instead implements :class:`~pyramid.interfaces.IResponse`, the
append_slash logic will behave as if ``append_slash=True`` was passed,
but the provided class will be used as the response class instead of
the default :class:`~pyramid.httpexceptions.HTTPTemporaryRedirect`
response class when a redirect is performed. For example:
.. code-block:: python
from pyramid.httpexceptions import HTTPMovedPermanently
config.add_notfound_view(append_slash=HTTPMovedPermanently)
The above means that a redirect to a slash-appended route will be
attempted, but instead of
:class:`~pyramid.httpexceptions.HTTPTemporaryRedirect`
being used, :class:`~pyramid.httpexceptions.HTTPMovedPermanently will
be used` for the redirect response if a slash-appended route is found.
:class:`~pyramid.httpexceptions.HTTPTemporaryRedirect` class is used
as default response, which is equivalent to
:class:`~pyramid.httpexceptions.HTTPFound` with addition of redirecting
with the same HTTP method (useful when doing POST requests).
.. versionadded:: 1.3
.. versionchanged:: 1.6
The ``append_slash`` argument was modified to allow any object that
implements the ``IResponse`` interface to specify the response class
used when a redirect is performed.
.. versionchanged:: 1.8
The view is created using ``exception_only=True``.
.. versionchanged: 1.10
Default response was changed from
:class:`~pyramid.httpexceptions.HTTPFound`
to :class:`~pyramid.httpexceptions.HTTPTemporaryRedirect`.
"""
for arg in (
'name',
'permission',
'context',
'for_',
'require_csrf',
'exception_only',
):
if arg in view_options:
raise ConfigurationError(
'%s may not be used as an argument to add_notfound_view'
% (arg,)
)
if view is None:
view = default_exceptionresponse_view
settings = dict(
view=view,
context=HTTPNotFound,
exception_only=True,
wrapper=wrapper,
request_type=request_type,
request_method=request_method,
request_param=request_param,
containment=containment,
xhr=xhr,
accept=accept,
header=header,
path_info=path_info,
custom_predicates=custom_predicates,
decorator=decorator,
mapper=mapper,
match_param=match_param,
route_name=route_name,
permission=NO_PERMISSION_REQUIRED,
require_csrf=False,
)
settings.update(view_options)
if append_slash:
view = self._derive_view(view, attr=attr, renderer=renderer)
if IResponse.implementedBy(append_slash):
view = AppendSlashNotFoundViewFactory(
view, redirect_class=append_slash
)
else:
view = AppendSlashNotFoundViewFactory(view)
settings['view'] = view
else:
settings['attr'] = attr
settings['renderer'] = renderer
return self.add_view(**settings)
set_notfound_view = add_notfound_view # deprecated sorta-bw-compat alias
@viewdefaults
@action_method
def add_exception_view(
self,
view=None,
context=None,
# force all other arguments to be specified as key=value
**view_options,
):
"""Add an :term:`exception view` for the specified ``exception`` to
the current configuration state. The view will be called when Pyramid
or application code raises the given exception.
This method accepts almost all of the same arguments as
:meth:`pyramid.config.Configurator.add_view` except for ``name``,
``permission``, ``for_``, ``require_csrf``, and ``exception_only``.
By default, this method will set ``context=Exception``, thus
registering for most default Python exceptions. Any subclass of
``Exception`` may be specified.
.. versionadded:: 1.8
"""
for arg in (
'name',
'for_',
'exception_only',
'require_csrf',
'permission',
):
if arg in view_options:
raise ConfigurationError(
'%s may not be used as an argument to add_exception_view'
% (arg,)
)
if context is None:
context = Exception
view_options.update(
dict(
view=view,
context=context,
exception_only=True,
permission=NO_PERMISSION_REQUIRED,
require_csrf=False,
)
)
return self.add_view(**view_options)
@action_method
def set_view_mapper(self, mapper):
"""
Setting a :term:`view mapper` makes it possible to make use of
:term:`view callable` objects which implement different call
signatures than the ones supported by :app:`Pyramid` as described in
its narrative documentation.
The ``mapper`` argument should be an object implementing
:class:`pyramid.interfaces.IViewMapperFactory` or a :term:`dotted
Python name` to such an object. The provided ``mapper`` will become
the default view mapper to be used by all subsequent :term:`view
configuration` registrations.
.. seealso::
See also :ref:`using_a_view_mapper`.
.. note::
Using the ``default_view_mapper`` argument to the
:class:`pyramid.config.Configurator` constructor
can be used to achieve the same purpose.
"""
mapper = self.maybe_dotted(mapper)
def register():
self.registry.registerUtility(mapper, IViewMapperFactory)
# IViewMapperFactory is looked up as the result of view config
# in phase 3
intr = self.introspectable(
'view mappers',
IViewMapperFactory,
self.object_description(mapper),
'default view mapper',
)
intr['mapper'] = mapper
self.action(
IViewMapperFactory,
register,
order=PHASE1_CONFIG,
introspectables=(intr,),
)
@action_method
def add_static_view(self, name, path, **kw):
"""Add a view used to render static assets such as images
and CSS files.
The ``name`` argument is a string representing an
application-relative local URL prefix. It may alternately be a full
URL.
The ``path`` argument is the path on disk where the static files
reside. This can be an absolute path, a package-relative path, or a
:term:`asset specification`.
The ``cache_max_age`` keyword argument is input to set the
``Expires`` and ``Cache-Control`` headers for static assets served.
Note that this argument has no effect when the ``name`` is a *url
prefix*. By default, this argument is ``None``, meaning that no
particular Expires or Cache-Control headers are set in the response.
The ``content_encodings`` keyword argument is a list of alternative
file encodings supported in the ``Accept-Encoding`` HTTP Header.
Alternative files are found using file extensions defined in
:attr:`mimetypes.encodings_map`. An encoded asset will be returned
with the ``Content-Encoding`` header set to the selected encoding.
If the asset contains alternative encodings then the
``Accept-Encoding`` value will be added to the response's ``Vary``
header. By default, the list is empty and no alternatives will be
supported.
The ``permission`` keyword argument is used to specify the
:term:`permission` required by a user to execute the static view. By
default, it is the string
:data:`pyramid.security.NO_PERMISSION_REQUIRED`, a special sentinel
which indicates that, even if a :term:`default permission` exists for
the current application, the static view should be renderered to
completely anonymous users. This default value is permissive
because, in most web apps, static assets seldom need protection from
viewing. If ``permission`` is specified, the security checking will
be performed against the default root factory ACL.
Any other keyword arguments sent to ``add_static_view`` are passed on
to :meth:`pyramid.config.Configurator.add_route` (e.g. ``factory``,
perhaps to define a custom factory with a custom ACL for this static
view).
*Usage*
The ``add_static_view`` function is typically used in conjunction
with the :meth:`pyramid.request.Request.static_url` method.
``add_static_view`` adds a view which renders a static asset when
some URL is visited; :meth:`pyramid.request.Request.static_url`
generates a URL to that asset.
The ``name`` argument to ``add_static_view`` is usually a simple URL
prefix (e.g. ``'images'``). When this is the case, the
:meth:`pyramid.request.Request.static_url` API will generate a URL
which points to a Pyramid view, which will serve up a set of assets
that live in the package itself. For example:
.. code-block:: python
add_static_view('images', 'mypackage:images/')
Code that registers such a view can generate URLs to the view via
:meth:`pyramid.request.Request.static_url`:
.. code-block:: python
request.static_url('mypackage:images/logo.png')
When ``add_static_view`` is called with a ``name`` argument that
represents a URL prefix, as it is above, subsequent calls to
:meth:`pyramid.request.Request.static_url` with paths that start with
the ``path`` argument passed to ``add_static_view`` will generate a
URL something like ``http://<Pyramid app URL>/images/logo.png``,
which will cause the ``logo.png`` file in the ``images`` subdirectory
of the ``mypackage`` package to be served.
``add_static_view`` can alternately be used with a ``name`` argument
which is a *URL*, causing static assets to be served from an external
webserver. This happens when the ``name`` argument is a fully
qualified URL (e.g. starts with ``http://`` or similar). In this
mode, the ``name`` is used as the prefix of the full URL when
generating a URL using :meth:`pyramid.request.Request.static_url`.
Furthermore, if a protocol-relative URL (e.g. ``//example.com/images``)
is used as the ``name`` argument, the generated URL will use the
protocol of the request (http or https, respectively).
For example, if ``add_static_view`` is called like so:
.. code-block:: python
add_static_view('http://example.com/images', 'mypackage:images/')
Subsequently, the URLs generated by
:meth:`pyramid.request.Request.static_url` for that static view will
be prefixed with ``http://example.com/images`` (the external webserver
listening on ``example.com`` must be itself configured to respond
properly to such a request.):
.. code-block:: python
static_url('mypackage:images/logo.png', request)
See :ref:`static_assets_section` for more information.
.. versionchanged:: 2.0
Added the ``content_encodings`` argument.
"""
spec = self._make_spec(path)
info = self._get_static_info()
info.add(self, name, spec, **kw)
def add_cache_buster(self, path, cachebust, explicit=False):
"""
Add a cache buster to a set of files on disk.
The ``path`` should be the path on disk where the static files
reside. This can be an absolute path, a package-relative path, or a
:term:`asset specification`.
The ``cachebust`` argument may be set to cause
:meth:`~pyramid.request.Request.static_url` to use cache busting when
generating URLs. See :ref:`cache_busting` for general information
about cache busting. The value of the ``cachebust`` argument must
be an object which implements
:class:`~pyramid.interfaces.ICacheBuster`.
If ``explicit`` is set to ``True`` then the ``path`` for the cache
buster will be matched based on the ``rawspec`` instead of the
``pathspec`` as defined in the
:class:`~pyramid.interfaces.ICacheBuster` interface.
Default: ``False``.
.. versionadded:: 1.6
"""
spec = self._make_spec(path)
info = self._get_static_info()
info.add_cache_buster(self, spec, cachebust, explicit=explicit)
def _get_static_info(self):
info = self.registry.queryUtility(IStaticURLInfo)
if info is None:
info = StaticURLInfo()
self.registry.registerUtility(info, IStaticURLInfo)
return info
def isexception(o):
if IInterface.providedBy(o):
if IException.isEqualOrExtendedBy(o):
return True
return isinstance(o, Exception) or (
inspect.isclass(o) and (issubclass(o, Exception))
)
def runtime_exc_view(view, excview):
# create a view callable which can pretend to be both a normal view
# and an exception view, dispatching to the appropriate one based
# on the state of request.exception
def wrapper_view(context, request):
if getattr(request, 'exception', None):
return excview(context, request)
return view(context, request)
# these constants are the same between the two views
wrapper_view.__wraps__ = wrapper_view
wrapper_view.__original_view__ = getattr(view, '__original_view__', view)
wrapper_view.__module__ = view.__module__
wrapper_view.__doc__ = view.__doc__
wrapper_view.__name__ = view.__name__
wrapper_view.__accept__ = getattr(view, '__accept__', None)
wrapper_view.__order__ = getattr(view, '__order__', MAX_ORDER)
wrapper_view.__phash__ = getattr(view, '__phash__', DEFAULT_PHASH)
wrapper_view.__view_attr__ = getattr(view, '__view_attr__', None)
wrapper_view.__permission__ = getattr(view, '__permission__', None)
def wrap_fn(attr):
def wrapper(context, request):
if getattr(request, 'exception', None):
selected_view = excview
else:
selected_view = view
fn = getattr(selected_view, attr, None)
if fn is not None:
return fn(context, request)
return wrapper
# these methods are dynamic per-request and should dispatch to their
# respective views based on whether it's an exception or not
wrapper_view.__call_permissive__ = wrap_fn('__call_permissive__')
wrapper_view.__permitted__ = wrap_fn('__permitted__')
wrapper_view.__predicated__ = wrap_fn('__predicated__')
wrapper_view.__predicates__ = wrap_fn('__predicates__')
return wrapper_view
@implementer(IViewDeriverInfo)
| ViewsConfiguratorMixin |
python | pyqtgraph__pyqtgraph | pyqtgraph/multiprocess/remoteproxy.py | {
"start": 46863,
"end": 48917
} | class ____(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
| DeferredObjectProxy |
python | kamyu104__LeetCode-Solutions | Python/closest-binary-search-tree-value-ii.py | {
"start": 1916,
"end": 4374
} | class ____(object):
def closestKValues(self, root, target, k):
"""
:type root: TreeNode
:type target: float
:type k: int
:rtype: List[int]
"""
# Helper class to make a stack to the next node.
class BSTIterator:
# @param root, a binary search tree's root node
def __init__(self, stack, child1, child2):
self.stack = list(stack)
self.cur = self.stack.pop()
self.child1 = child1
self.child2 = child2
# @return an integer, the next node
def next(self):
node = None
if self.cur and self.child1(self.cur):
self.stack.append(self.cur)
node = self.child1(self.cur)
while self.child2(node):
self.stack.append(node)
node = self.child2(node)
elif self.stack:
prev = self.cur
node = self.stack.pop()
while node:
if self.child2(node) is prev:
break
else:
prev = node
node = self.stack.pop() if self.stack else None
self.cur = node
return node
# Build the stack to the closet node.
stack = []
while root:
stack.append(root)
root = root.left if target < root.val else root.right
dist = lambda node: abs(node.val - target) if node else float("inf")
stack = stack[:stack.index(min(stack, key=dist))+1]
# The forward or backward iterator.
backward = lambda node: node.left
forward = lambda node: node.right
smaller_it, larger_it = BSTIterator(stack, backward, forward), BSTIterator(stack, forward, backward)
smaller_node, larger_node = smaller_it.next(), larger_it.next()
# Get the closest k values by advancing the iterators of the stacks.
result = [stack[-1].val]
for _ in xrange(k - 1):
if dist(smaller_node) < dist(larger_node):
result.append(smaller_node.val)
smaller_node = smaller_it.next()
else:
result.append(larger_node.val)
larger_node = larger_it.next()
return result
| Solution2 |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 80955,
"end": 92155
} | class ____:
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, pytester: Pytester, contest_with_teardown_fixture) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
assert 0
"""
)
output = pytester.runpytest("-rfE")
output.stdout.re_match_lines(
[
r"test_teardown_with_test_also_failing.py FE\s+\[100%\]",
"FAILED test_teardown_with_test_also_failing.py::test_foo - assert 0",
"ERROR test_teardown_with_test_also_failing.py::test_foo - assert False",
]
)
def test_teardown_many(self, pytester: Pytester, many_files) -> None:
output = pytester.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(
self, pytester: Pytester, many_files, color_mapping
) -> None:
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"test_bar.py::test_bar[0] PASSED * [ 5%]",
"test_bar.py::test_bar[0] ERROR * [ 5%]",
"test_bar.py::test_bar[4] PASSED * [ 25%]",
"test_foo.py::test_foo[14] PASSED * [100%]",
"test_foo.py::test_foo[14] ERROR * [100%]",
"=* 20 passed, 20 errors in *",
]
)
)
def test_xdist_normal(self, many_files, pytester: Pytester, monkeypatch) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
def test_skip_reasons_folding() -> None:
path = "xyz"
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = cast(CollectReport, X())
ev1.when = "execute"
ev1.skipped = True # type: ignore[misc]
ev1.longrepr = longrepr
ev2 = cast(CollectReport, X())
ev2.when = "execute"
ev2.longrepr = longrepr
ev2.skipped = True # type: ignore[misc]
# ev3 might be a collection report
ev3 = cast(CollectReport, X())
ev3.when = "collect"
ev3.longrepr = longrepr
ev3.skipped = True # type: ignore[misc]
values = _folded_skips(Path.cwd(), [ev1, ev2, ev3])
assert len(values) == 1
num, fspath, lineno_, reason = values[0]
assert num == 3
assert fspath == path
assert lineno_ == lineno
assert reason == message
def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
mocked_verbose_word = "FAILED"
mocked_pos = "some::nodeid"
def mock_get_pos(*args):
return mocked_pos
monkeypatch.setattr(_pytest.terminal, "_get_node_id_with_markup", mock_get_pos)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class config:
def __init__(self):
self.option = Namespace(verbose=0)
class rep:
def _get_verbose_word_with_markup(self, *args):
return mocked_verbose_word, {}
class longrepr:
class reprcrash:
pass
def check(msg, width, expected):
class DummyTerminalWriter:
fullwidth = width
def markup(self, word: str, **markup: str):
return word
__tracebackhide__ = True
if msg:
rep.longrepr.reprcrash.message = msg # type: ignore
actual = _get_line_with_reprcrash_message(
config(), # type: ignore[arg-type]
rep(), # type: ignore[arg-type]
DummyTerminalWriter(), # type: ignore[arg-type]
{},
)
assert actual == expected
if actual != f"{mocked_verbose_word} {mocked_pos}":
assert len(actual) <= width
assert wcswidth(actual) <= width
# AttributeError with message
check(None, 80, "FAILED some::nodeid")
check("msg", 80, "FAILED some::nodeid - msg")
check("msg", 3, "FAILED some::nodeid")
check("msg", 24, "FAILED some::nodeid")
check("msg", 25, "FAILED some::nodeid - msg")
check("some longer msg", 24, "FAILED some::nodeid")
check("some longer msg", 25, "FAILED some::nodeid - ...")
check("some longer msg", 26, "FAILED some::nodeid - s...")
check("some\nmessage", 25, "FAILED some::nodeid - ...")
check("some\nmessage", 26, "FAILED some::nodeid - some")
check("some\nmessage", 80, "FAILED some::nodeid - some")
# Test unicode safety.
check("🉐🉐🉐🉐🉐\n2nd line", 25, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 26, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 27, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 28, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED some::nodeid - 🉐🉐...")
# NOTE: constructed, not sure if this is supported.
mocked_pos = "nodeid::🉐::withunicode"
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED nodeid::🉐::withunicode")
check("🉐🉐🉐🉐🉐\n2nd line", 40, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 41, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 42, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 80, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐🉐🉐")
def test_short_summary_with_verbose(
monkeypatch: MonkeyPatch, pytester: Pytester
) -> None:
"""With -vv do not truncate the summary info (#11777)."""
# On CI we also do not truncate the summary info, monkeypatch it to ensure we
# are testing against the -vv flag on CI.
monkeypatch.setattr(_pytest.terminal, "running_on_ci", lambda: False)
string_length = 200
pytester.makepyfile(
f"""
def test():
s1 = "A" * {string_length}
s2 = "B" * {string_length}
assert s1 == s2
"""
)
# No -vv, summary info should be truncated.
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*short test summary info*",
"* assert 'AAA...",
],
)
# No truncation with -vv.
result = pytester.runpytest("-vv")
result.stdout.fnmatch_lines(
[
"*short test summary info*",
f"*{'A' * string_length}*{'B' * string_length}'",
]
)
def test_full_sequence_print_with_vv(
monkeypatch: MonkeyPatch, pytester: Pytester
) -> None:
"""Do not truncate sequences in summaries with -vv (#11777)."""
monkeypatch.setattr(_pytest.terminal, "running_on_ci", lambda: False)
pytester.makepyfile(
"""
def test_len_list():
l = list(range(10))
assert len(l) == 9
def test_len_dict():
d = dict(zip(range(10), range(10)))
assert len(d) == 9
"""
)
result = pytester.runpytest("-vv")
assert result.ret == 1
result.stdout.fnmatch_lines(
[
"*short test summary info*",
f"*{list(range(10))}*",
f"*{dict(zip(range(10), range(10), strict=True))}*",
]
)
def test_force_short_summary(monkeypatch: MonkeyPatch, pytester: Pytester) -> None:
monkeypatch.setattr(_pytest.terminal, "running_on_ci", lambda: False)
pytester.makepyfile(
"""
def test():
assert "a\\n" * 10 == ""
"""
)
result = pytester.runpytest("-vv", "--force-short-summary")
assert result.ret == 1
result.stdout.fnmatch_lines(
["*short test summary info*", "*AssertionError: assert 'a\\na\\na\\na..."]
)
@pytest.mark.parametrize(
"seconds, expected",
[
(10.0, "10.00s"),
(10.34, "10.34s"),
(59.99, "59.99s"),
(60.55, "60.55s (0:01:00)"),
(123.55, "123.55s (0:02:03)"),
(60 * 60 + 0.5, "3600.50s (1:00:00)"),
],
)
def test_format_session_duration(seconds, expected):
from _pytest.terminal import format_session_duration
assert format_session_duration(seconds) == expected
@pytest.mark.parametrize(
"seconds, expected",
[
(3600 * 100 - 60, " 99h 59m"),
(31 * 60 - 1, " 30m 59s"),
(10.1236, " 10.124s"),
(9.1236, " 9.124s"),
(0.1236, " 123.6ms"),
(0.01236, " 12.36ms"),
(0.001236, " 1.236ms"),
(0.0001236, " 123.6us"),
(0.00001236, " 12.36us"),
(0.000001236, " 1.236us"),
],
)
def test_format_node_duration(seconds: float, expected: str) -> None:
from _pytest.terminal import format_node_duration
assert format_node_duration(seconds) == expected
def test_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"*= ERRORS =*",
"*_ ERROR collecting test_collecterror.py _*",
"E SyntaxError: *",
"*= short test summary info =*",
"ERROR test_collecterror.py",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
def test_no_summary_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", "--no-summary", str(p1))
result.stdout.no_fnmatch_line("*= ERRORS =*")
def test_via_exec(pytester: Pytester) -> None:
p1 = pytester.makepyfile("exec('def test_via_exec(): pass')")
result = pytester.runpytest(str(p1), "-vv")
result.stdout.fnmatch_lines(
["test_via_exec.py::test_via_exec <- <string> PASSED*", "*= 1 passed in *"]
)
| TestProgressWithTeardown |
python | django__django | tests/utils_tests/test_inspect.py | {
"start": 4488,
"end": 6200
} | class ____(unittest.TestCase):
@classmethod
def _class_method(cls) -> None:
return None
@staticmethod
def _static_method() -> None:
return None
def test_builtin(self):
self.assertIs(inspect.is_module_level_function(any), False)
self.assertIs(inspect.is_module_level_function(isinstance), False)
def test_from_module(self):
self.assertIs(inspect.is_module_level_function(subprocess.run), True)
self.assertIs(inspect.is_module_level_function(subprocess.check_output), True)
self.assertIs(
inspect.is_module_level_function(inspect.is_module_level_function), True
)
def test_private_function(self):
def private_function():
pass
self.assertIs(inspect.is_module_level_function(private_function), False)
def test_coroutine(self):
self.assertIs(inspect.is_module_level_function(aget_object_or_404), True)
def test_method(self):
self.assertIs(inspect.is_module_level_function(self.test_method), False)
self.assertIs(inspect.is_module_level_function(self.setUp), False)
def test_unbound_method(self):
self.assertIs(
inspect.is_module_level_function(self.__class__.test_unbound_method), True
)
self.assertIs(inspect.is_module_level_function(self.__class__.setUp), True)
def test_lambda(self):
self.assertIs(inspect.is_module_level_function(lambda: True), False)
def test_class_and_static_method(self):
self.assertIs(inspect.is_module_level_function(self._static_method), True)
self.assertIs(inspect.is_module_level_function(self._class_method), False)
| IsModuleLevelFunctionTestCase |
python | Lightning-AI__lightning | tests/tests_pytorch/core/test_metric_result_integration.py | {
"start": 9390,
"end": 23143
} | class ____(Metric):
def __init__(self):
super().__init__()
self.add_state("sum", tensor(0), dist_reduce_fx=torch.sum)
self.add_state("count", tensor(0), dist_reduce_fx=torch.sum)
def update(self, increment):
self.sum += increment
self.count += 1
def compute(self):
return self.sum // self.count
def __repr__(self) -> str:
return f"{self.__class__.__name__}(sum={self.sum}, count={self.count})"
def result_collection_reload(default_root_dir, accelerator="auto", devices=1, **kwargs):
class CustomException(Exception):
pass
batches = 5
class ExtendedBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.breaking_batch_idx = 3
self.has_validated_sum = False
self.dummy_metric = DummyMeanMetric()
@property
def results(self):
return self.trainer.fit_loop._results
def training_step(self, batch, batch_idx):
# We run 5 batches, meaning batch_idx from [0..4]
# Without failure, we expect to get `total=sum(range(5))` and `num_batches=5`
# When not restarting, it simulates a failure on `batch_idx=3` and test the state after reload
# Compute `on_epoch_end` would be `10/5=2` if the metric state had been serialized and reloaded
if self.trainer.fit_loop.restarting:
self.log("tracking", batch_idx, on_step=True, on_epoch=True)
self.log("tracking_2", batch_idx, on_step=True, on_epoch=True, sync_dist=True)
self.dummy_metric(batch_idx)
self.log("tracking_metric", self.dummy_metric, on_step=True, on_epoch=True)
value = self.results["training_step.tracking_metric"]
value_2 = self.results["training_step.tracking"]
# On failure, the Metric states are being accumulated on rank 0 and zeroed-out on other ranks.
# The shift indicates we failed while the state was `shift=sign(is_global_zero > 0) * [0..3]`
shift = 0
if devices == 2:
shift = 3 if self.trainer.is_global_zero else -3
expected = sum(range(batch_idx + 1)) + shift
assert expected == value == value_2
else:
if batch_idx == self.breaking_batch_idx:
# simulate failure mid epoch
raise CustomException
self.log("tracking", batch_idx, on_step=True, on_epoch=True)
self.log("tracking_2", batch_idx, on_step=True, on_epoch=True, sync_dist=True)
self.dummy_metric(batch_idx)
self.log("tracking_metric", self.dummy_metric, on_step=True, on_epoch=True)
value = self.results["training_step.tracking"]
assert value == sum(range(batch_idx + 1))
value = self.results["training_step.tracking_2"]
assert value == sum(range(batch_idx + 1))
return super().training_step(batch, batch_idx)
def on_train_epoch_end(self) -> None:
if self.trainer.fit_loop.restarting:
# the state of the results before the exception is not saved and restored, so the total starts after
# the breaking_batch_idx
total = sum(range(self.breaking_batch_idx, batches))
metrics = self.results.metrics(on_step=False)
computed_value = self.dummy_metric.compute()
assert self.results["training_step.tracking"].value == total
expected = total / (batches - self.breaking_batch_idx)
assert metrics["callback"]["tracking"] == expected
assert computed_value == 2
assert self.results["training_step.tracking_2"].value == total
assert metrics["callback"]["tracking_2"] == expected
assert computed_value == 2
self.has_validated_sum = True
model = ExtendedBoringModel()
trainer_kwargs = {
"max_epochs": 1,
"limit_train_batches": batches,
"limit_val_batches": 0,
"accelerator": accelerator,
"devices": devices,
"enable_progress_bar": False,
"enable_model_summary": False,
"default_root_dir": default_root_dir,
"callbacks": OnExceptionCheckpoint(default_root_dir),
}
trainer_kwargs.update(kwargs)
trainer = Trainer(**trainer_kwargs)
with suppress(CustomException):
trainer.fit(model)
assert not model.has_validated_sum
tmp_path = (
trainer.strategy.broadcast(trainer_kwargs["default_root_dir"], 0)
if devices >= 2
else trainer_kwargs["default_root_dir"]
)
ckpt_path = os.path.join(tmp_path, "on_exception.ckpt")
trainer = Trainer(**trainer_kwargs)
trainer.fit(model, ckpt_path=ckpt_path)
assert model.has_validated_sum
@pytest.mark.parametrize(
"kwargs",
[
pytest.param({}, marks=RunIf(mps=False)),
pytest.param({"strategy": "ddp", "accelerator": "gpu", "devices": 1}, marks=RunIf(min_cuda_gpus=1)),
pytest.param(
{"strategy": "ddp", "accelerator": "gpu", "devices": 2}, marks=RunIf(min_cuda_gpus=2, standalone=True)
),
],
)
def test_result_collection_reload(tmp_path, kwargs):
result_collection_reload(default_root_dir=tmp_path, **kwargs)
def test_metric_collections(tmp_path):
"""This test ensures the metric attribute is properly found even with complex nested metric structure."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.metrics_list = ModuleList([DummyMetric() for _ in range(2)])
self.metrics_dict = ModuleDict({"a": DummyMetric(), "b": DummyMetric()})
self.metrics_collection_dict = MetricCollection({"a": DummyMetric(), "b": DummyMetric()})
self.metrics_collection_dict_nested = ModuleDict({
"a": ModuleList([ModuleDict({"b": DummyMetric()}), DummyMetric()])
})
def training_step(self, batch, batch_idx):
loss = super().training_step(batch, batch_idx)
self.metrics_list[0](batch_idx)
self.metrics_list[1](batch_idx)
self.metrics_dict["a"](batch_idx)
self.metrics_dict["b"](batch_idx)
self.metrics_collection_dict["a"](batch_idx)
self.metrics_collection_dict["b"](batch_idx)
self.metrics_collection_dict_nested["a"][0]["b"](batch_idx)
self.metrics_collection_dict_nested["a"][1](batch_idx)
self.log("a", self.metrics_list[0])
self.log("b", self.metrics_list[1])
self.log("c", self.metrics_dict["a"])
self.log("d", self.metrics_dict["b"])
self.log("e", self.metrics_collection_dict["a"])
self.log("f", self.metrics_collection_dict["b"])
self.log("g", self.metrics_collection_dict_nested["a"][0]["b"])
self.log("h", self.metrics_collection_dict_nested["a"][1])
return loss
def on_train_epoch_end(self) -> None:
results = self.trainer.fit_loop.epoch_loop._results
assert results["training_step.a"].meta.metric_attribute == "metrics_list.0"
assert results["training_step.b"].meta.metric_attribute == "metrics_list.1"
assert results["training_step.c"].meta.metric_attribute == "metrics_dict.a"
assert results["training_step.d"].meta.metric_attribute == "metrics_dict.b"
assert results["training_step.e"].meta.metric_attribute == "metrics_collection_dict.a"
assert results["training_step.f"].meta.metric_attribute == "metrics_collection_dict.b"
assert results["training_step.g"].meta.metric_attribute == "metrics_collection_dict_nested.a.0.b"
assert results["training_step.h"].meta.metric_attribute == "metrics_collection_dict_nested.a.1"
model = TestModel()
trainer = Trainer(default_root_dir=tmp_path, max_epochs=2, limit_train_batches=2, limit_val_batches=0)
trainer.fit(model)
def test_metric_result_computed_check():
"""Unittest ``_get_cache`` with multielement tensors."""
metadata = _Metadata("foo", "bar", on_epoch=True, enable_graph=True)
metadata.sync = _Sync()
rm = _ResultMetric(metadata, is_tensor=True)
computed_value = tensor([1, 2, 3])
rm._computed = computed_value
cache = _ResultCollection._get_cache(rm, on_step=False)
# `enable_graph=True` so no detach, identity works
assert cache is computed_value
@pytest.mark.parametrize(
("default_type", "converted_type"),
[
(torch.half, torch.float),
(torch.float, torch.float),
(torch.double, torch.double),
],
)
def test_metric_result_respects_dtype(default_type, converted_type):
from lightning.pytorch.trainer.connectors.logger_connector.result import warning_cache
warning_cache.clear()
torch.set_default_dtype(default_type)
fixed_dtype = torch.long # default by PyTorch
metadata = _Metadata("foo", "bar")
metadata.sync = _Sync()
rm = _ResultMetric(metadata, is_tensor=True)
assert rm.value.dtype == converted_type
assert rm.cumulated_batch_size.dtype == fixed_dtype
# two fixed point numbers - should be converted
value, batch_size = tensor(2), 3
assert value.dtype == fixed_dtype
with pytest.warns(
UserWarning, match=rf"`self.log\('bar', ...\)` in your `foo` .* Converting it to {converted_type}"
):
rm.update(value, batch_size)
# floating and fixed
rm.update(tensor(4.0), 5)
total = rm.compute()
assert total == (2 * 3 + 4 * 5) / (5 + 3)
assert total.dtype == converted_type
# restore to avoid impacting other tests
torch.set_default_dtype(torch.float)
@pytest.mark.parametrize("reduce_fx", ["mean", sum])
def test_metric_result_dtype_promotion(reduce_fx):
metadata = _Metadata("foo", "bar", reduce_fx=reduce_fx)
metadata.sync = _Sync()
rm = _ResultMetric(metadata, is_tensor=True)
assert rm.value.dtype == torch.float
# log a double
rm.update(tensor(0, dtype=torch.double), 1)
# `rm.value.dtype` is promoted
assert rm.value.dtype == torch.double
# log a float
rm.update(tensor(0, dtype=torch.float), 1)
# the previous dtype stays
assert rm.value.dtype == torch.double
total = rm.compute()
assert total.dtype == torch.double
@pytest.mark.parametrize("input_dtype", [torch.int8, torch.float16, torch.bfloat16])
def test_metric_result_precision_no_lower_than_float32(input_dtype):
"""Test that the ResultMetric only stores values in float32 or higher precision for numerical stability."""
metadata = _Metadata("foo", "bar", reduce_fx="sum")
metadata.sync = _Sync()
metric = _ResultMetric(metadata, is_tensor=True)
assert metric.value.dtype == torch.float
# in bfloat16, truncation would occur at 256 (8 bit exponent)
# in int8, overflow would occur at 128
for i in range(1000):
metric.update(tensor(1.0, dtype=input_dtype), 1)
assert metric.value.dtype == torch.float32
total = metric.compute()
assert total.item() == 1000.0
assert total.dtype == torch.float32
@pytest.mark.parametrize(("reduce_fx", "expected"), [(max, -2), (min, 2)])
def test_result_metric_max_min(reduce_fx, expected):
metadata = _Metadata("foo", "bar", reduce_fx=reduce_fx)
metadata.sync = _Sync()
rm = _ResultMetric(metadata, is_tensor=True)
rm.update(tensor(expected), 1)
assert rm.compute() == expected
def test_compute_not_a_tensor_raises():
class RandomMetric(Metric):
def update(self):
pass
def compute(self):
return tensor(1.0), tensor(2.0)
class MyModel(BoringModel):
def __init__(self):
super().__init__()
self.metric = RandomMetric()
def on_train_start(self):
self.log("foo", self.metric)
model = MyModel()
trainer = Trainer(
limit_train_batches=1,
limit_val_batches=0,
max_epochs=1,
enable_progress_bar=False,
enable_checkpointing=False,
logger=False,
enable_model_summary=False,
)
with pytest.raises(ValueError, match=r"compute\(\)` return of.*foo' must be a tensor"):
trainer.fit(model)
@pytest.mark.parametrize("distributed_env", [True, False])
@pytest.mark.parametrize("log_val", [tensor(0.5), "Accuracy"])
def test_logger_sync_dist(distributed_env, log_val):
if log_val == "Accuracy":
log_val = Accuracy(task="binary") if _TM_GE_0_11 else Accuracy()
pl.trainer.connectors.logger_connector.result.warning_cache.clear()
# self.log('bar', 0.5, ..., sync_dist=False)
meta = _Metadata("foo", "bar")
meta.sync = _Sync(_should=False)
is_tensor = isinstance(log_val, Tensor)
if not is_tensor:
log_val.update(tensor([0, 1]), tensor([0, 0], dtype=torch.long))
result_metric = _ResultMetric(metadata=meta, is_tensor=is_tensor)
result_metric.update(log_val, 10)
warning_ctx = pytest.warns if distributed_env and is_tensor else no_warning_call
patch_ctx = (
mock.patch("torch.distributed.is_initialized", return_value=distributed_env)
if isinstance(log_val, Tensor)
else nullcontext()
)
with (
warning_ctx(PossibleUserWarning, match=r"recommended to use `self.log\('bar', ..., sync_dist=True\)`"),
patch_ctx,
):
value = _ResultCollection._get_cache(result_metric, on_step=False)
assert value == 0.5
| DummyMeanMetric |
python | ray-project__ray | rllib/core/models/tests/test_mlp_encoders.py | {
"start": 284,
"end": 3182
} | class ____(unittest.TestCase):
def test_mlp_encoders(self):
"""Tests building MLP encoders properly and checks for correct architecture."""
# Loop through different combinations of hyperparameters.
inputs_dims_configs = [[1], [50]]
list_of_hidden_layer_dims = [[], [1], [64, 64], [256, 256, 256]]
hidden_layer_activations = [None, "linear", "relu", "tanh", "swish"]
hidden_layer_use_layernorms = [False, True]
output_dims = [1, 48]
output_activations = hidden_layer_activations
hidden_use_biases = [False, True]
output_use_biases = [False, True]
for permutation in itertools.product(
inputs_dims_configs,
list_of_hidden_layer_dims,
hidden_layer_activations,
hidden_layer_use_layernorms,
output_activations,
output_dims,
hidden_use_biases,
output_use_biases,
):
(
inputs_dims,
hidden_layer_dims,
hidden_layer_activation,
hidden_layer_use_layernorm,
output_activation,
output_dim,
hidden_use_bias,
output_use_bias,
) = permutation
print(
f"Testing ...\n"
f"input_dims: {inputs_dims}\n"
f"hidden_layer_dims: {hidden_layer_dims}\n"
f"hidden_layer_activation: {hidden_layer_activation}\n"
f"hidden_layer_use_layernorm: {hidden_layer_use_layernorm}\n"
f"output_activation: {output_activation}\n"
f"output_dim: {output_dim}\n"
f"hidden_use_bias: {hidden_use_bias}\n"
f"output_use_bias: {output_use_bias}\n"
)
config = MLPEncoderConfig(
input_dims=inputs_dims,
hidden_layer_dims=hidden_layer_dims,
hidden_layer_activation=hidden_layer_activation,
hidden_layer_use_layernorm=hidden_layer_use_layernorm,
hidden_layer_use_bias=hidden_use_bias,
output_layer_dim=output_dim,
output_layer_activation=output_activation,
output_layer_use_bias=output_use_bias,
)
# Use a ModelChecker to compare all added models (different frameworks)
# with each other.
model_checker = ModelChecker(config)
# Add this framework version of the model to our checker.
outputs = model_checker.add(framework="torch")
self.assertEqual(outputs[ENCODER_OUT].shape, (1, output_dim))
# Check all added models against each other.
model_checker.check()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMLPEncoders |
python | networkx__networkx | networkx/algorithms/isomorphism/ismags.py | {
"start": 11513,
"end": 60177
} | class ____:
"""
Implements the ISMAGS subgraph matching algorithm. [1]_ ISMAGS stands for
"Index-based Subgraph Matching Algorithm with General Symmetries". As the
name implies, it is symmetry aware and will only generate non-symmetric
isomorphisms.
Attributes
----------
graph: networkx.Graph
subgraph: networkx.Graph
Notes
-----
ISMAGS does a symmetry analysis to find the constraints on isomorphisms if
we preclude yielding isomorphisms that differ by a symmetry of the subgraph.
For example, if the subgraph is a 4-cycle, every isomorphism would have a
symmetric version with the nodes rotated relative to the original isomorphism.
By encoding these symmetries as constraints we reduce the search space for
isomorphisms and we also simplify processing the resulting isomorphisms.
**Symmetry Analysis**
The constraints in ISMAGS are based off the handling in ``nauty`` and its many
variants, in particular ``saucy``, as discussed in the ISMAGS paper [1]_.
That paper cites [3]_ for details on symmetry handling. Figure 2 of [3]_
describes the DFS approach to symmetries used here and relying on a data structure
called an Ordered Pair Partitions(OPP). This consists of a pair of partitions
where each part represents nodes with the same degree-by-color over all colors.
We refine these partitions simultaneously in a way to result in permutations
of the nodes that preserve the graph structure. We thus find automorphisms
for the subgraph of interest. From those we identify pairs of nodes which
are structurally equivalent. We then constrain our problem by requiring the
first of the pair to always be assigned first in the isomorphism -- thus
constraining the isomorphisms reported to only one example from the set of all
symmetrically equivalent isomorphisms. These constraints are computed once
based on the subgraph symmetries and then used throughout the DFS search for
isomorphisms.
Finding the symmetries involves a DFS of the OPP wherein we "couple" a node
to a node in its degree-by-color part of the partition. This "coupling" is done
via assigning a new color in the top partition to the node being coupled,
and the same new color in the bottom partition to the node being coupled to.
This new color has only one node in each partition. The new color also requires
that we "refine" both top and bottom partitions by splitting parts until each
part represents a common degree-by-color value. Those refinements introduce
new colors as the parts are split during refinement. Parts do not get combined
during refinement. So the coupling/refining process always results in at least
one new part with only one node in both the top and bottom partition. In practice
we usually refine into many new one-node parts in both partitions.
We continue in this way until each node has its own part/color in the top partition
-- and the node in the bottom partition with that color is the symmetric node.
That is, an OPP represents an automorphism, and thus a symmetry
of the subgraph when each color has a single node in the top partition and a single
node in the bottom partition. From those automorphisms we build up a set of nodes
that can be obtained from each other by symmetry (they are mutually symmetric).
That set of nodes is called an "orbit" of the subgraph under symmetry.
After finding the orbits for one symmetry, we backtrack in the DFS by removing the
latest coupling and replacing it with a coupling from the same top node to a new
bottom node in its degree-by-color grouping. When all possible couplings for that
node are considered we backtrack to the previously coupled node and recouple in
a DFS manner.
We can prune the DFS search tree in helpful ways. The paper [2]_ demonstrates 6
situations of interest in the DFS where pruning is possible:
- An **Automorphism OPP** is an OPP where every part in both partitions
contains a single node. The mapping/automorphism is found by mapping
each top node to the bottom node in the same color part. For example,
``[({1}, {2}, {3}); ({2}, {3}, {1})]`` represents the mapping of each
node to the next in a triangle. It rotates the nodes around the triangle.
- The **Identity OPP** is the first automorphism found during the DFS. It
appears on the left side of the DFS tree and is first due to our ordering of
coupling nodes to be in an arbitrary but fixed ordering of the nodes. This
automorphism does not show any symmetries, but it ensures the orbit for each
node includes itself and it sets us up for handling the symmetries. Note that
a subgraph with no symmetries will only have the identity automorphism.
- A **Non-isomorphic OPP** occurs when refinement creates a different number of
parts in the top partition than in the bottom partition. This means no symmetries
will be found during further processing of that subtree of the DFS. We prune
the subtree and continue.
- A **Matching OPP** is such that each top part that has more than one node is
in fact equal to the bottom part with the same color. The many-node-parts match
exactly. The single-node parts then represent symmetries that do not permute
any matching nodes. Matching OPPs arise while finding the Identity Mapping. But
the single-node parts are identical in the two partitions, so no useful symmetries
are found. But after the Identity Mapping is found, every Matching OPP encountered
will have different nodes in at least two single-node parts of the same color.
So these positions in the DFS provide us with symmetries without any
need to find the whole automorphism. We can prune the subtree, update the orbits
and backtrack. Any larger symmetries that combine these symmetries with symmetries
of the many-node-parts do not need to be explored because the symmetry "generators"
found in this way provide a basis for all symmetries. We will find the symmetry
generators of the many-node-parts at another subtree of the DFS.
- An **Orbit Pruning OPP** is an OPP where the node coupling to be considered next
has both nodes already known to be in the same orbit. We have already identified
those permutations when we discovered the orbit. So we can prune the resulting
subtree. This is the primary pruning discussed in [1]_.
- A **Coset Point** in the DFS is a point of the tree when a node is first
back-tracked. That is, its couplings have all been analyzed once and we backtrack
to its parent. So, said another way, when a node is backtracked to and is about to
be mapped to a different node for the first time, its child in the DFS has been
completely analysed. Thus the orbit for that child at this point in the DFS is
the full orbit for symmetries involving only that child or larger nodes in the
node order. All smaller nodes are mapped to themselves.
This orbit is due to symmetries not involving smaller nodes. Such an orbit is
called the "coset" of that node. The Coset Point does not lead to pruning or to
more symmetries. It is the point in the process where we store the **coset** of
the node being backtracked. We use the cosets to construct the symmetry
constraints.
Once the pruned DFS tree has been traversed, we have collected the cosets of some
special nodes. Often most nodes are not coupled during the progression down the left
side of the DFS. They are separated from other nodes during the partition refinement
process after coupling. So they never get coupled directly. Thus the number of cosets
we find is typically many fewer than the number of nodes.
We turn those cosets into constraints on the nodes when building non-symmetric
isomorphisms. The node whose coset is used is paired with each other node in the
coset. These node-pairs form the constraints. During isomorphism construction we
always select the first of the constraint before the other. This removes subtrees
from the DFS traversal space used to build isomorphisms.
The constraints we obtain via symmetry analysis of the subgraph are used for
finding non-symmetric isomorphisms. We prune the isomorphism tree based on
the constraints we obtain from the symmetry analysis.
**Isomorphism Construction**
Once we have symmetry constraints on the isomorphisms, ISMAGS constructs the allowed
isomorphisms by mapping each node of the subgraph to all possible nodes (with the
same degree-by-color) from the graph. We partition all nodes into degree-by-color
parts and order the subgraph nodes we consider using smallest part size first.
The idea is to try to map the most difficult subgraph nodes first (most difficult
here means least number of graph candidates).
By considering each potential subgraph node to graph candidate mapping image in turn,
we perform a DFS traversal of partial mappings. If the mapping is rejected due to
the graph neighbors not matching the degree-by-color of the subgraph neighbors, or
rejected due to the constraints imposed from symmetry, we prune that subtree and
consider a new graph candidate node for that subgraph node. When no more graph
candidates remain we backtrack to the previous node in the mapping and consider a
new graph candidate for that node. If we ever get to a depth where all subgraph nodes
are mapped and no structural requirements or symmetry constraints are violated,
we have found an isomorphism. We yield that mapping and backtrack to find other
isomorphisms.
As we visit more neighbors, the graph candidate nodes have to satisfy more structural
restrictions. As described in the ISMAGS paper, [1]_, we store each set of structural
restrictions separately as a set of possible candidate nodes rather than computing
the intersection of that set with the known graph candidates for the subgraph node.
We delay taking the intersection until that node is selected to be in the mapping.
While choosing the node with fewest candidates, we avoid computing the intersection
by using the size of the minimal set to be intersected rather than the size of the
intersection. This may make the node ordering slightly worse via a savings of
many intersections most of which are not ever needed.
References
----------
.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle,
M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General
Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph
Enumeration", PLoS One 9(5): e97896, 2014.
https://doi.org/10.1371/journal.pone.0097896
.. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph
.. [3] Hadi Katebi, Karem A. Sakallah and Igor L. Markov
"Graph Symmetry Detection and Canonical Labeling: Differences and Synergies"
in "Turing-100. The Alan Turing Centenary" Ed: A. Voronkov p. 181 -- 195, (2012).
https://doi.org/10.29007/gzc1 https://arxiv.org/abs/1208.6271
"""
def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None):
"""
Parameters
----------
graph: networkx.Graph
subgraph: networkx.Graph
node_match: collections.abc.Callable or None
Function used to determine whether two nodes are equivalent. Its
signature should look like ``f(n1: dict, n2: dict) -> bool``, with
`n1` and `n2` node property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_node_match` and
friends.
If `None`, all nodes are considered equal.
edge_match: collections.abc.Callable or None
Function used to determine whether two edges are equivalent. Its
signature should look like ``f(e1: dict, e2: dict) -> bool``, with
`e1` and `e2` edge property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_edge_match` and
friends.
If `None`, all edges are considered equal.
cache: collections.abc.Mapping
A cache used for caching graph symmetries.
"""
if graph.is_directed() != subgraph.is_directed():
raise ValueError("Directed and undirected graphs cannot be compared.")
# TODO: allow for precomputed partitions and colors
self.graph = graph
self.subgraph = subgraph
self._symmetry_cache = cache
# Naming conventions are taken from the original paper.
# For your sanity:
# sg: subgraph
# g: graph
# e: edge(s)
# n: node(s)
# So: sgn means "subgraph nodes".
node_parts = self.create_aligned_partitions(
node_match, self.subgraph.nodes, self.graph.nodes
)
self._sgn_partition, self._gn_partition, self.N_node_colors = node_parts
self._sgn_colors = node_to_part_ID_dict(self._sgn_partition)
self._gn_colors = node_to_part_ID_dict(self._gn_partition)
edge_partitions = self.create_aligned_partitions(
edge_match, self.subgraph.edges(), self.graph.edges()
)
self._sge_partition, self._ge_partition, self.N_edge_colors = edge_partitions
if self.graph.is_directed():
self._sge_colors = node_to_part_ID_dict(self._sge_partition)
self._ge_colors = node_to_part_ID_dict(self._ge_partition)
else: # allow lookups (u, v) or (v, u)
self._sge_colors = EdgeLookup(node_to_part_ID_dict(self._sge_partition))
self._ge_colors = EdgeLookup(node_to_part_ID_dict(self._ge_partition))
def create_aligned_partitions(self, thing_matcher, sg_things, g_things):
"""Partitions of "things" (nodes or edges) from subgraph and graph
based on function `thing_matcher`.
Returns: sg_partition, g_partition, number_of_matched_parts
The first `number_of_matched_parts` parts in each partition
match in order, e.g. 2nd part matches other's 2nd part.
Warning: nodes in parts after that have no matching nodes in the other graph.
For morphisms those nodes can't appear in the mapping.
"""
if thing_matcher is None:
sg_partition = [set(sg_things)]
g_partition = [set(g_things)]
return sg_partition, g_partition, 1
# Use thing_matcher to create a partition
# Note: isinstance(G.edges(), OutEdgeDataView) is only true for multi(di)graph
sg_multiedge = isinstance(sg_things, nx.classes.reportviews.OutEdgeDataView)
g_multiedge = isinstance(g_things, nx.classes.reportviews.OutEdgeDataView)
if not sg_multiedge:
def sg_match(thing1, thing2):
return thing_matcher(sg_things[thing1], sg_things[thing2])
else: # multiedges (note nodes of multigraphs use simple case above)
def sg_match(thing1, thing2):
(u1, v1), (u2, v2) = thing1, thing2
return thing_matcher(self.subgraph[u1][v1], self.subgraph[u2][v2])
if not g_multiedge:
def g_match(thing1, thing2):
return thing_matcher(g_things[thing1], g_things[thing2])
else: # multiedges (note nodes of multigraphs use simple case above)
def g_match(thing1, thing2):
(u1, v1), (u2, v2) = thing1, thing2
return thing_matcher(self.graph[u1][v1], self.graph[u2][v2])
sg_partition = make_partition(sg_things, sg_match)
g_partition = make_partition(g_things, g_match)
# Align order of g_partition to that of sg_partition
sgc_to_gc = {}
gc_to_sgc = {}
sN, N = len(sg_partition), len(g_partition)
for sgc, gc in itertools.product(range(sN), range(N)):
sgt = next(iter(sg_partition[sgc]))
gt = next(iter(g_partition[gc]))
sgt_ = sg_things[sgt] if not sg_multiedge else self.subgraph[sgt[0]][sgt[1]]
gt_ = g_things[gt] if not g_multiedge else self.graph[gt[0]][gt[1]]
if thing_matcher(sgt_, gt_):
# TODO: remove these two if-checks when confident they never arise
# The `check` feature in match_partitions should ensure they do not
if sgc in sgc_to_gc:
raise nx.NetworkXError(
f"\nMatching function {thing_matcher} seems faulty.\n"
f"Partition found: {sg_partition=}\n"
f"So {sgt} in subgraph part {sg_partition[sgc]} matches two "
f"graph parts {g_partition[gc]} and "
f"{g_partition[sgc_to_gc[sgc]]}\n"
)
if gc in gc_to_sgc:
raise nx.NetworkXError(
f"\nMatching function seems broken: {thing_matcher}\n"
f"Partitions found: {g_partition=} {sg_partition=}\n"
f"So {gt} in graph part {g_partition[gc]} matches two "
f"subgraph parts {sg_partition[sgc]} and "
f"{sg_partition[gc_to_sgc[gc]]}\n"
)
sgc_to_gc[sgc] = gc
gc_to_sgc[gc] = sgc
## return two lists and the number of partitions that match.
new_order = [
(sg_partition[sgc], g_partition[gc]) for sgc, gc in sgc_to_gc.items()
]
Ncolors = len(new_order)
if Ncolors:
new_sg_p, new_g_p = [list(x) for x in zip(*new_order)]
else:
new_sg_p, new_g_p = [], []
if Ncolors < sN:
extra = [sg_partition[c] for c in range(sN) if c not in sgc_to_gc]
new_sg_p = list(new_sg_p) + extra
new_g_p = list(new_g_p) + [set()] * len(extra)
if Ncolors < N:
extra = [g_partition[c] for c in range(N) if c not in gc_to_sgc]
new_g_p = list(new_g_p) + extra
new_sg_p = list(new_sg_p) + [set()] * len(extra)
return new_sg_p, new_g_p, Ncolors
def find_isomorphisms(self, symmetry=True):
"""Find all subgraph isomorphisms between subgraph and graph
Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
isomorphisms may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
elif len(self.graph) < len(self.subgraph):
return
elif len(self._sgn_partition) > self.N_node_colors:
# some subgraph nodes have a color that doesn't occur in graph
return
elif len(self._sge_partition) > self.N_edge_colors:
# some subgraph edges have a color that doesn't occur in graph
return
if symmetry:
cosets = self.analyze_subgraph_symmetry()
# Turn cosets into constraints.
constraints = [(n, co) for n, cs in cosets.items() for co in cs if n != co]
else:
constraints = []
cand_sets = self._get_node_color_candidate_sets()
lookahead_candidates = self._get_color_degree_candidates()
for sgn, lookahead_cands in lookahead_candidates.items():
cand_sets[sgn].add(frozenset(lookahead_cands))
if any(cand_sets.values()):
# Choose start node based on a heuristic for the min # of candidates
# Heuristic here is length of smallest frozenset in candidates' set
# of frozensets for that node. Using the smallest length avoids
# computing the intersection of the frozensets for each node.
start_sgn = min(cand_sets, key=lambda n: min(len(x) for x in cand_sets[n]))
cand_sets[start_sgn] = (frozenset.intersection(*cand_sets[start_sgn]),)
yield from self._map_nodes(start_sgn, cand_sets, constraints)
return
def _get_color_degree_candidates(self):
"""
Returns a mapping of {subgraph node: set of graph nodes} for
which the graph nodes are feasible mapping candidate_sets for the
subgraph node, as determined by looking ahead one edge.
"""
g_deg = color_degree_by_node(self.graph, self._gn_colors, self._ge_colors)
sg_deg = color_degree_by_node(self.subgraph, self._sgn_colors, self._sge_colors)
return {
sgn: {
gn
for gn, (_, *g_counts) in g_deg.items()
if all(
sg_cnt <= g_counts[idx][color]
for idx, counts in enumerate(needed_counts)
for color, sg_cnt in counts.items()
)
}
for sgn, (_, *needed_counts) in sg_deg.items()
}
def largest_common_subgraph(self, symmetry=True):
"""
Find the largest common induced subgraphs between :attr:`subgraph` and
:attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
largest common subgraphs may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
if symmetry:
cosets = self.analyze_subgraph_symmetry()
# Turn cosets into constraints.
constraints = [(n, cn) for n, cs in cosets.items() for cn in cs if n != cn]
else:
constraints = []
candidate_sets = self._get_node_color_candidate_sets()
if any(candidate_sets.values()):
relevant_parts = self._sgn_partition[: self.N_node_colors]
to_be_mapped = {frozenset(n for p in relevant_parts for n in p)}
yield from self._largest_common_subgraph(
candidate_sets, constraints, to_be_mapped
)
else:
return
def analyze_subgraph_symmetry(self):
"""
Find a minimal set of permutations and corresponding co-sets that
describe the symmetry of ``self.subgraph``, given the node and edge
equalities given by `node_partition` and `edge_colors`, respectively.
Returns
-------
dict[collections.abc.Hashable, set[collections.abc.Hashable]]
The found co-sets. The co-sets is a dictionary of
``{node key: set of node keys}``.
Every key-value pair describes which ``values`` can be interchanged
without changing nodes less than ``key``.
"""
partition, edge_colors = self._sgn_partition, self._sge_colors
if self._symmetry_cache is not None:
key = hash(
(
tuple(self.subgraph.nodes),
tuple(self.subgraph.edges),
tuple(map(tuple, node_partition)),
tuple(edge_colors.items()),
self.subgraph.is_directed(),
)
)
if key in self._symmetry_cache:
return self._symmetry_cache[key]
partition = self._refine_node_partition(self.subgraph, partition, edge_colors)
cosets = self._process_ordered_pair_partitions(
self.subgraph, partition, partition, edge_colors
)
if self._symmetry_cache is not None:
self._symmetry_cache[key] = cosets
return cosets
def is_isomorphic(self, symmetry=False):
"""
Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and
False otherwise.
Returns
-------
bool
"""
return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic(
symmetry
)
def subgraph_is_isomorphic(self, symmetry=False):
"""
Returns True if a subgraph of :attr:`graph` is isomorphic to
:attr:`subgraph` and False otherwise.
Returns
-------
bool
"""
# symmetry=False, since we only need to know whether there is any
# example; figuring out all symmetry elements probably costs more time
# than it gains.
isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None)
return isom is not None
def isomorphisms_iter(self, symmetry=True):
"""
Does the same as :meth:`find_isomorphisms` if :attr:`graph` and
:attr:`subgraph` have the same number of nodes.
"""
if len(self.graph) == len(self.subgraph):
yield from self.subgraph_isomorphisms_iter(symmetry=symmetry)
def subgraph_isomorphisms_iter(self, symmetry=True):
"""Alternative name for :meth:`find_isomorphisms`."""
return self.find_isomorphisms(symmetry)
def _get_node_color_candidate_sets(self):
"""
Per node in subgraph find all nodes in graph that have the same color.
Stored as a dict-of-set-of-frozenset. The dict is keyed by node to a
collection of frozensets of graph nodes. Each of these frozensets are
a restriction. The node can be mapped only to nodes in the frozenset.
Thus it must be mapped to nodes in the intersection of all these sets.
We store the sets to delay taking the intersection of them. This helps
for two reasons: Firstly any duplicate restriction sets can be ignored;
Secondly, some nodes will not need the intersection to be constructed.
Note: a dict-of-list-of-set would store duplicate sets in the list and
we want to avoid that. But I wonder if checking hash/equality when `add`ing
removes the benefit of avoiding computing intersections.
"""
candidate_sets = defaultdict(set)
for sgn in self.subgraph.nodes:
sgn_color = self._sgn_colors[sgn]
if sgn_color >= self.N_node_colors: # color has no candidates
candidate_sets[sgn] # creates empty set entry in defaultdict
else:
candidate_sets[sgn].add(frozenset(self._gn_partition[sgn_color]))
return dict(candidate_sets)
@classmethod
def _refine_node_partition(cls, graph, partition, edge_colors):
def equal_color(node1, node2):
return color_degree[node1] == color_degree[node2]
node_colors = node_to_part_ID_dict(partition)
color_degree = color_degree_by_node(graph, node_colors, edge_colors)
while not all(are_all_equal(color_degree[n] for n in p) for p in partition):
partition = [
p
for part in partition
for p in (
[part]
if are_all_equal(color_degree[n] for n in part)
else sorted(make_partition(part, equal_color, check=False), key=len)
)
]
node_colors = node_to_part_ID_dict(partition)
color_degree = color_degree_by_node(graph, node_colors, edge_colors)
return partition
def _map_nodes(self, sgn, candidate_sets, constraints, to_be_mapped=None):
"""
Find all subgraph isomorphisms honoring constraints.
The collection `candidate_sets` is stored as a dict-of-set-of-frozenset.
The dict is keyed by node to a collection of candidate frozensets. Any
viable candidate must belong to all the frozensets in the collection.
So each frozenset added to the collection is a restriction on the candidates.
According to the paper, we store the collection of sets rather than their
intersection to delay computing many intersections with the hope of avoiding
them completely. Having the middle collection be a set also means that
duplicate restrictions on candidates are ignored, avoiding another intersection.
"""
# shortcuts for speed
subgraph = self.subgraph
subgraph_adj = subgraph._adj
graph = self.graph
graph_adj = graph._adj
self_ge_partition = self._ge_partition
self_sge_colors = self._sge_colors
is_directed = subgraph.is_directed()
gn_ID_to_node = list(graph)
gn_node_to_ID = {n: id for id, n in enumerate(graph)}
mapping = {}
rev_mapping = {}
if to_be_mapped is None:
to_be_mapped = subgraph_adj.keys()
# Note that we don't copy candidates here. This means we leak
# information between the branches of the search. This is intentional!
# Specifically, we modify candidates here. That's OK because we substitute
# the set of frozensets with a set containing the frozenset intersection.
# So, it doesn't change the membership rule or the length rule for sorting.
# Membership: any candidate must be an element of each of the frozensets.
# Length: length of the intersection set. Use heuristic min(len of frozensets).
# This intersection improves future length heuristics which can only occur
# after this element of the queu is popped. But it means future additional
# restriction frozensets that duplicate previous ones are not ignored.
sgn_candidates = frozenset.intersection(*candidate_sets[sgn])
candidate_sets[sgn] = {sgn_candidates}
queue = [(sgn, candidate_sets, iter(sgn_candidates))]
while queue: # DFS over all possible mappings
sgn, candidate_sets, sgn_cand_iter = queue[-1]
for gn in sgn_cand_iter:
# We're going to try to map sgn to gn.
if gn in rev_mapping:
continue # pragma: no cover
# REDUCTION and COMBINATION
if sgn in mapping:
old_gn = mapping[sgn]
del rev_mapping[old_gn]
mapping[sgn] = gn
rev_mapping[gn] = sgn
# BASECASE
if len(mapping) == len(to_be_mapped):
yield rev_mapping.copy()
del mapping[sgn]
del rev_mapping[gn]
continue
left_to_map = to_be_mapped - mapping.keys()
# We copy the candidates dict. But it is not a deepcopy.
# This avoids inner set copies, yet still allows updates b/c setitem
# changes sgn in new dict without changing original set.
# Below be careful to not change the sets of frozensets.
cand_sets = candidate_sets.copy()
# update the candidate_sets for unmapped sgn based on sgn mapped
if not is_directed:
sgn_nbrs = subgraph_adj[sgn]
not_gn_nbrs = graph_adj.keys() - graph_adj[gn].keys()
for sgn2 in left_to_map:
# edge color must match when sgn2 connected to sgn
if sgn2 not in sgn_nbrs:
gn2_cands = not_gn_nbrs
else:
g_edges = self_ge_partition[self_sge_colors[sgn, sgn2]]
gn2_cands = {n for e in g_edges if gn in e for n in e}
# Node color compatibility should be taken care of by the
# initial candidate lists made by find_subgraphs
# Add gn2_cands to the right collection.
# Do not change the original set. So do not use |= operator
cand_sets[sgn2] = cand_sets[sgn2] | {frozenset(gn2_cands)}
else: # directed
sgn_nbrs = subgraph_adj[sgn].keys()
sgn_preds = subgraph._pred[sgn].keys()
not_gn_nbrs = (
graph_adj.keys() - graph_adj[gn].keys() - graph._pred[gn].keys()
)
for sgn2 in left_to_map:
# edge color must match when sgn2 connected to sgn
if sgn2 not in sgn_nbrs:
if sgn2 not in sgn_preds:
gn2_cands = not_gn_nbrs
else: # sgn2 in sgn_preds
g_edges = self_ge_partition[self_sge_colors[sgn2, sgn]]
gn2_cands = {e[0] for e in g_edges if gn == e[1]}
else:
if sgn2 not in sgn_preds:
g_edges = self_ge_partition[self_sge_colors[sgn, sgn2]]
gn2_cands = {e[1] for e in g_edges if gn == e[0]}
else:
# gn2 must have correct color in both directions
g_edges = self_ge_partition[self_sge_colors[sgn, sgn2]]
gn2_cands = {e[1] for e in g_edges if gn == e[0]}
g_edges = self_ge_partition[self_sge_colors[sgn2, sgn]]
gn2_cands &= {e[0] for e in g_edges if gn == e[1]}
# Do not change the original set. So do not use |= operator
cand_sets[sgn2] = cand_sets[sgn2] | {frozenset(gn2_cands)}
for sgn2 in left_to_map:
# symmetry must match. constraints mean gn2>gn iff sgn2>sgn
if (sgn, sgn2) in constraints:
gn2_cands = set(gn_ID_to_node[gn_node_to_ID[gn] + 1 :])
elif (sgn2, sgn) in constraints:
gn2_cands = set(gn_ID_to_node[: gn_node_to_ID[gn]])
else:
continue # pragma: no cover
# Do not change the original set. So do not use |= operator
cand_sets[sgn2] = cand_sets[sgn2] | {frozenset(gn2_cands)}
# The next node is the one that is unmapped and has fewest candidates
# Use the heuristic of the min size of the frozensets rather than
# intersection of all frozensets to delay computing intersections.
new_sgn = min(
left_to_map, key=lambda n: min(len(x) for x in cand_sets[n])
)
new_sgn_candidates = frozenset.intersection(*cand_sets[new_sgn])
if not new_sgn_candidates:
continue
cand_sets[new_sgn] = {new_sgn_candidates}
queue.append((new_sgn, cand_sets, iter(new_sgn_candidates)))
break
else: # all gn candidates tried for sgn.
queue.pop()
if sgn in mapping:
del rev_mapping[mapping[sgn]]
del mapping[sgn]
def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None):
"""
Find all largest common subgraphs honoring constraints.
"""
# to_be_mapped is a set of frozensets of subgraph nodes
if to_be_mapped is None:
to_be_mapped = {frozenset(self.subgraph.nodes)}
# The LCS problem is basically a repeated subgraph isomorphism problem
# with smaller and smaller subgraphs. We store the nodes that are
# "part of" the subgraph in to_be_mapped, and we make it a little
# smaller every iteration.
current_size = len(next(iter(to_be_mapped), []))
found_iso = False
if current_size <= len(self.graph):
# There's no point in trying to find isomorphisms of
# graph >= subgraph if subgraph has more nodes than graph.
# Try the isomorphism first with the nodes with lowest ID. So sort
# them. Those are more likely to be part of the final correspondence.
# In theory, this makes finding the first answer(s) faster.
for nodes in sorted(to_be_mapped, key=sorted):
# Find the isomorphism between subgraph[to_be_mapped] <= graph
next_sgn = min(nodes, key=lambda n: min(len(x) for x in candidates[n]))
isomorphs = self._map_nodes(
next_sgn, candidates, constraints, to_be_mapped=nodes
)
# This is effectively `yield from isomorphs`, except that we look
# whether an item was yielded.
try:
item = next(isomorphs)
except StopIteration:
pass
else:
yield item
yield from isomorphs
found_iso = True
# BASECASE
if found_iso or current_size == 1:
# Shrinking has no point because either 1) we end up with a smaller
# common subgraph (and we want the largest), or 2) there'll be no
# more subgraph.
return
left_to_be_mapped = set()
for nodes in to_be_mapped:
for sgn in nodes:
# We're going to remove sgn from to_be_mapped, but subject to
# symmetry constraints. We know that for every constraint we
# have those subgraph nodes are equal. So whenever we would
# remove the lower part of a constraint, remove the higher
# instead. This is all dealth with by _remove_node. And because
# left_to_be_mapped is a set, we don't do double work.
# And finally, make the subgraph one node smaller.
# REDUCTION
new_nodes = self._remove_node(sgn, nodes, constraints)
left_to_be_mapped.add(new_nodes)
# COMBINATION
yield from self._largest_common_subgraph(
candidates, constraints, to_be_mapped=left_to_be_mapped
)
@staticmethod
def _remove_node(node, nodes, constraints):
"""
Returns a new set where node has been removed from nodes, subject to
symmetry constraints. We know, that for every constraint we have
those subgraph nodes are equal. So whenever we would remove the
lower part of a constraint, remove the higher instead.
"""
while True:
for low, high in constraints:
if low == node and high in nodes:
node = high
break
else: # no break, couldn't find node in constraints
return frozenset(nodes - {node})
@staticmethod
def _get_permutations_by_length(items):
"""
Get all permutations of items, but only permute items with the same
length.
>>> found = list(ISMAGS._get_permutations_by_length([{1}, {2}, {3, 4}, {4, 5}]))
>>> answer = [
... (({1}, {2}), ({3, 4}, {4, 5})),
... (({1}, {2}), ({4, 5}, {3, 4})),
... (({2}, {1}), ({3, 4}, {4, 5})),
... (({2}, {1}), ({4, 5}, {3, 4})),
... ]
>>> found == answer
True
"""
by_len = defaultdict(list)
for item in items:
by_len[len(item)].append(item)
return list(
itertools.product(
*(itertools.permutations(by_len[l]) for l in sorted(by_len))
)
)
def _refine_opp(cls, graph, top, bottom, edge_colors):
def equal_color(node1, node2):
return color_degree[node1] == color_degree[node2]
top = cls._refine_node_partition(graph, top, edge_colors)
possible_bottoms = [bottom]
while possible_bottoms:
bottom = possible_bottoms.pop()
node_colors = node_to_part_ID_dict(bottom)
color_degree = color_degree_by_node(graph, node_colors, edge_colors)
if all(are_all_equal(color_degree[n] for n in p) for p in bottom):
if len(top) == len(bottom):
yield top, bottom
# else Non-isomorphic OPP (pruned here)
# either way continue to next possible bottom
continue
# refine bottom partition
more_bottoms = [[]]
for part in bottom:
if len(part) == 1 or are_all_equal(color_degree[node] for node in part):
for new_bottom in more_bottoms:
new_bottom.append(part)
else:
# This part needs to be refined
refined_part = make_partition(part, equal_color, check=False)
R = len(refined_part)
if R == 1 or R == len({len(p) for p in refined_part}):
# no two parts have same length -- simple case
for n_p in more_bottoms:
n_p.extend(sorted(refined_part, key=len))
else:
# Any part might match any other part with the same size.
# Before refinement they were the same color. So we need to
# include all possible orderings/colors within each size.
permutations = cls._get_permutations_by_length(refined_part)
# Add all permutations of the refined parts to each possible
# bottom. So the number of new possible bottoms is multiplied
# by the number of permutations of the refined parts.
new_partitions = []
for new_partition in more_bottoms:
for p in permutations:
# p is tuple-of-tuples-of-sets. Flatten to list-of-sets
flat_p = [s for tup in p for s in tup]
new_partitions.append(new_partition + flat_p)
more_bottoms = new_partitions
# reverse more_bottoms to keep the "finding identity" bottom first
possible_bottoms.extend(more_bottoms[::-1])
@staticmethod
def _find_permutations(top_partition, bottom_partition):
"""
Return a set of 2-tuples of nodes. These nodes are not equal
but are mapped to each other in the symmetry represented by this OPP.
Swapping all the 2-tuples of nodes in this set permutes the nodes
but retains the graph structure. Thus it is a symmetry of the subgraph.
"""
# Find permutations
permutations = set()
for top, bot in zip(top_partition, bottom_partition):
if len(top) > 1 or len(bot) > 1:
# ignore parts with > 1 element when they are equal
# These are called Matching OPPs in Katebi 2012.
# Symmetries in matching partitions are built by considering
# only parts that have 1 element.
if top == bot:
continue
raise IndexError(
"Not all nodes are matched. This is"
f" impossible: {top_partition}, {bottom_partition}"
)
# top and bot have only one element
elif top != bot:
permutations.add(frozenset((next(iter(top)), next(iter(bot)))))
return permutations
def _process_ordered_pair_partitions(
self,
graph,
top_partition,
bottom_partition,
edge_colors,
):
if all(len(top) <= 1 for top in top_partition):
# no symmetries. Each node unique.
return {}
# first mapping found is the identity mapping
finding_identity = True
orbit_id = {node: orbit_i for orbit_i, node in enumerate(graph)}
orbits = [{node} for node in graph]
cosets = {}
node_to_ID = {n: i for i, n in enumerate(graph)}
sort_by_ID = node_to_ID.__getitem__
def _load_next_queue_entry(queue, top_partition, bottom_partition):
# find smallest node (by ID) in a |part|>1 and its partition index
unmapped_nodes = (
(node_to_ID[node], node, idx)
for idx, t_part in enumerate(top_partition)
for node in t_part
if len(t_part) > 1
)
_, node, part_i = min(unmapped_nodes)
b_part = bottom_partition[part_i]
node2_iter = iter(sorted(b_part, key=sort_by_ID))
queue.append([top_partition, bottom_partition, node, part_i, node2_iter])
queue = []
_load_next_queue_entry(queue, top_partition, bottom_partition)
while queue:
tops, bottoms, node, part_i, node2_iter = queue[-1]
for node2 in node2_iter:
if node != node2 and orbit_id[node] == orbit_id[node2]:
# Orbit prune
continue
# couple node to node2
new_top_part = {node}
new_bot_part = {node2}
new_top = [top.copy() for top in tops]
new_top[part_i] -= new_top_part
new_top.insert(part_i, new_top_part)
new_bot = [bot.copy() for bot in bottoms]
new_bot[part_i] -= new_bot_part
new_bot.insert(part_i, new_bot_part)
# collect OPPs
opps = self._refine_opp(graph, new_top, new_bot, edge_colors)
new_q = []
for opp in opps:
# Use OPP to find any of: Identity, Automorphism or Matching OPPs
# else load the OPP onto queue for further exploration
# Note that we check for Orbit pruning later because orbits may
# be updated while OPP is sitting on the queue.
# Note that we check for Non-isomorphic OPPs in `_refine_opp`.
if finding_identity:
# Note: allow zero size parts in identity check
# b/c largest_common_subgraph allows empty parts
if all(len(top) <= 1 for top in opp[0]):
# Identity found. Set flag. Can now prune Matching OPPs
finding_identity = False
continue
elif all(len(t) <= 1 or t == b for t, b in zip(*opp)):
# Found a symmetry! (Full mapping or Matching OPP)
# update orbits using the permutations from the OPP.
permutations = self._find_permutations(*opp)
for n1, n2 in permutations:
orb1 = orbit_id[n1]
orb2 = orbit_id[n2]
if orb1 != orb2:
orbit_set2 = orbits[orb2]
orbits[orb1].update(orbit_set2)
orbits[orb2] = set()
orbit_id.update((n, orb1) for n in orbit_set2)
continue
_load_next_queue_entry(new_q, *opp)
# reverse order to maintain node order DFS (Identity comes first)
queue.extend(new_q[::-1])
break
else: # no more node2 options
queue.pop()
if node not in cosets:
# coset of `node` is its orbit at the time `node` has completed
# its first DFS traversal. DFS is about to go to previous node.
# Make copy so future orbit changes do not change the coset.
cosets[node] = orbits[orbit_id[node]].copy()
return cosets
| ISMAGS |
python | cherrypy__cherrypy | cherrypy/lib/gctools.py | {
"start": 270,
"end": 3822
} | class ____(object):
"""An object which gathers all referrers of an object to a given depth."""
peek_length = 40
def __init__(self, ignore=None, maxdepth=2, maxparents=10):
"""Initialize a referrer tree structure."""
self.ignore = ignore or []
self.ignore.append(inspect.currentframe().f_back)
self.maxdepth = maxdepth
self.maxparents = maxparents
def ascend(self, obj, depth=1):
"""Return a nested list containing referrers of the given object."""
depth += 1
parents = []
# Gather all referrers in one step to minimize
# cascading references due to repr() logic.
refs = gc.get_referrers(obj)
self.ignore.append(refs)
if len(refs) > self.maxparents:
return [('[%s referrers]' % len(refs), [])]
try:
ascendcode = self.ascend.__code__
except AttributeError:
ascendcode = self.ascend.im_func.func_code
for parent in refs:
if inspect.isframe(parent) and parent.f_code is ascendcode:
continue
if parent in self.ignore:
continue
if depth <= self.maxdepth:
parents.append((parent, self.ascend(parent, depth)))
else:
parents.append((parent, []))
return parents
def peek(self, s):
"""Return s, restricted to a sane length."""
if len(s) > (self.peek_length + 3):
half = self.peek_length // 2
return s[:half] + '...' + s[-half:]
else:
return s
def _format(self, obj, descend=True):
"""Return a string representation of a single object."""
if inspect.isframe(obj):
filename, lineno, func, context, index = inspect.getframeinfo(obj)
return "<frame of function '%s'>" % func
if not descend:
return self.peek(repr(obj))
if isinstance(obj, dict):
return (
'{'
+ ', '.join(
[
'%s: %s'
% (
self._format(k, descend=False),
self._format(v, descend=False),
)
for k, v in obj.items()
],
)
+ '}'
)
elif isinstance(obj, list):
return (
'['
+ ', '.join(
[self._format(item, descend=False) for item in obj],
)
+ ']'
)
elif isinstance(obj, tuple):
return (
'('
+ ', '.join(
[self._format(item, descend=False) for item in obj],
)
+ ')'
)
r = self.peek(repr(obj))
if isinstance(obj, (str, int, float)):
return r
return '%s: %s' % (type(obj), r)
def format(self, tree):
"""Return a list of string reprs from a nested list of referrers."""
output = []
def ascend(branch, depth=1):
for parent, grandparents in branch:
output.append((' ' * depth) + self._format(parent))
if grandparents:
ascend(grandparents, depth + 1)
ascend(tree)
return output
def get_instances(cls):
"""Return GC instances."""
return [x for x in gc.get_objects() if isinstance(x, cls)]
| ReferrerTree |
python | apache__airflow | providers/google/tests/unit/google/cloud/utils/test_mlengine_prediction_summary.py | {
"start": 1175,
"end": 1421
} | class ____:
def test_encode(self):
assert mlengine_prediction_summary.JsonCoder.encode({"a": 1}) == b'{"a": 1}'
def test_decode(self):
assert mlengine_prediction_summary.JsonCoder.decode('{"a": 1}') == {"a": 1}
| TestJsonCode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.