language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/nth-magical-number.py | {
"start": 32,
"end": 703
} | class ____(object):
def nthMagicalNumber(self, N, A, B):
"""
:type N: int
:type A: int
:type B: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a % b
return a
def check(A, B, N, lcm, target):
return target//A + target//B - target//lcm >= N
lcm = A*B // gcd(A, B)
left, right = min(A, B), max(A, B)*N
while left <= right:
mid = left + (right-left)//2
if check(A, B, N, lcm, mid):
right = mid-1
else:
left = mid+1
return left % (10**9 + 7)
| Solution |
python | keras-team__keras | keras/src/ops/nn_test.py | {
"start": 47317,
"end": 91682
} | class ____(testing.TestCase):
def test_relu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(knn.relu(x), [0, 0, 1, 2, 3])
def test_relu6(self):
x = np.array([-1, 0, 1, 2, 3, 4, 5, 6, 7], dtype=np.float32)
self.assertAllClose(knn.relu6(x), [0, 0, 1, 2, 3, 4, 5, 6, 6])
def test_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.sigmoid(x), [0.26894143, 0.5, 0.7310586, 0.880797, 0.95257413]
)
def test_sparse_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(knn.sparse_sigmoid(x), [0.0, 0.5, 1.0, 1.0, 1.0])
def test_softplus(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.softplus(x),
[0.31326166, 0.6931472, 1.3132616, 2.126928, 3.0485873],
)
def test_softsign(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(knn.softsign(x), [-0.5, 0, 0.5, 0.6666667, 0.75])
def test_silu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.silu(x),
[-0.26894143, 0, 0.7310586, 1.7615942, 2.8577223],
)
def test_log_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.log_sigmoid(x),
[-1.3132616, -0.6931472, -0.31326166, -0.126928, -0.04858732],
)
def test_leaky_relu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.leaky_relu(x),
[-0.2, 0, 1, 2, 3],
)
def test_hard_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.hard_sigmoid(x),
[0.33333334, 0.5, 0.6666667, 0.8333334, 1.0],
)
def test_hard_silu(self):
x = np.array([-3, -2, -1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.hard_silu(x),
[-0.0, -0.333333, -0.333333, 0.0, 0.6666667, 1.6666667, 3.0],
)
def test_elu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.elu(x),
[-0.63212055, 0, 1, 2, 3],
)
self.assertAllClose(
knn.elu(x, alpha=0.5),
[-0.31606027, 0, 1, 2, 3],
)
def test_selu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.selu(x),
[-1.1113307, 0.0, 1.050701, 2.101402, 3.152103],
)
def test_gelu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.gelu(x),
[-0.15880796, 0.0, 0.841192, 1.9545977, 2.9963627],
)
def test_celu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.celu(x),
[-0.63212055, 0.0, 1.0, 2.0, 3.0],
)
def test_glu(self):
x = np.array([-1, 0, 1, 2, 3, 4], dtype=np.float32)
self.assertAllClose(
knn.glu(x),
[-0.8807971, 0.0, 0.98201376],
)
def test_tanh_shrink(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.tanh_shrink(x),
[-0.238406, 0.0, 0.238406, 1.035972, 2.004945],
)
def test_hard_tanh(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.hard_tanh(x),
[-1.0, 0.0, 1.0, 1.0, 1.0],
)
def test_hard_shrink(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.hard_shrink(x),
[0.0, 0.0, 1.0, 2.0, 3.0],
)
def test_threshold(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.threshold(x, 0, 0),
[0.0, 0.0, 1.0, 2.0, 3.0],
)
def test_squareplus(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.squareplus(x),
[0.780776, 1.0, 1.618034, 2.414214, 3.302776],
)
def test_soft_shrink(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.soft_shrink(x),
[0.0, 0.0, 0.5, 1.5, 2.5],
)
def test_sparse_plus(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.sparse_plus(x),
[0.0625, 0.25, 1.0, 2.0, 3.0],
)
def test_softmax(self):
x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllClose(
knn.softmax(x, axis=None), # Reduce on all axes.
[[0.045015, 0.122364, 0.33262], [0.045015, 0.122364, 0.33262]],
)
self.assertAllClose(
knn.softmax(x, axis=0),
[[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
)
self.assertAllClose(
knn.softmax(x, axis=-1),
[
[0.09003057, 0.24472848, 0.66524094],
[0.09003057, 0.24472848, 0.66524094],
],
)
self.assertAllClose(
knn.softmax(x), # Default axis should be -1.
[
[0.09003057, 0.24472848, 0.66524094],
[0.09003057, 0.24472848, 0.66524094],
],
)
def test_softmax_correctness_with_axis_tuple(self):
input = np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
combination = combinations(range(3), 2)
for axis in list(combination):
result = keras.ops.nn.softmax(input, axis=axis)
normalized_sum_by_axis = np.sum(
ops.convert_to_numpy(result), axis=axis
)
self.assertAllClose(normalized_sum_by_axis, 1.0)
def test_log_softmax(self):
x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllClose(
knn.log_softmax(x, axis=None), # Reduce on all axes.
[
[-3.100753, -2.100753, -1.100753],
[-3.100753, -2.100753, -1.100753],
],
)
self.assertAllClose(
knn.log_softmax(x, axis=0),
[
[-0.693147, -0.693147, -0.693147],
[-0.693147, -0.693147, -0.693147],
],
)
self.assertAllClose(
knn.log_softmax(x, axis=-1),
[
[-2.407606, -1.407606, -0.407606],
[-2.407606, -1.407606, -0.407606],
],
)
self.assertAllClose(
knn.log_softmax(x), # Default axis should be -1.
[
[-2.407606, -1.407606, -0.407606],
[-2.407606, -1.407606, -0.407606],
],
)
def test_log_softmax_correctness_with_axis_tuple(self):
input = np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
combination = combinations(range(3), 2)
for axis in list(combination):
result = keras.ops.nn.log_softmax(input, axis=axis)
normalized_sum_by_axis = np.sum(
np.exp(ops.convert_to_numpy(result)), axis=axis
)
self.assertAllClose(normalized_sum_by_axis, 1.0)
def test_polar_corectness(self):
abs_ = np.array([1, 2], dtype="float32")
angle = np.array([2, 3], dtype="float32")
out = knn.polar(abs_, angle)
self.assertAllClose(
out, [-0.41614684 + 0.9092974j, -1.979985 + 0.28224j], atol=1e-3
)
def test_sparsemax(self):
x = np.array([-0.5, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.sparsemax(x),
[0.0, 0.0, 0.0, 0.0, 1.0],
)
def test_max_pool(self):
data_format = backend.config.image_data_format()
# Test 1D max pooling.
if data_format == "channels_last":
input_shape = (2, 20, 3)
else:
input_shape = (2, 3, 20)
x = np.arange(120, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.max_pool(x, 2, 1, padding="valid"),
np_maxpool1d(x, 2, 1, padding="valid", data_format=data_format),
)
self.assertAllClose(
knn.max_pool(x, 2, 2, padding="same"),
np_maxpool1d(x, 2, 2, padding="same", data_format=data_format),
)
# Test 2D max pooling.
if data_format == "channels_last":
input_shape = (2, 10, 9, 3)
else:
input_shape = (2, 3, 10, 9)
x = np.arange(540, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.max_pool(x, 2, 1, padding="valid"),
np_maxpool2d(x, 2, 1, padding="valid", data_format=data_format),
)
self.assertAllClose(
knn.max_pool(x, 2, (2, 1), padding="same"),
np_maxpool2d(x, 2, (2, 1), padding="same", data_format=data_format),
)
def test_average_pool_valid_padding(self):
data_format = backend.config.image_data_format()
# Test 1D average pooling.
if data_format == "channels_last":
input_shape = (2, 20, 3)
else:
input_shape = (2, 3, 20)
x = np.arange(120, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.average_pool(x, 2, 1, padding="valid"),
np_avgpool1d(x, 2, 1, padding="valid", data_format=data_format),
)
# Test 2D average pooling.
if data_format == "channels_last":
input_shape = (2, 10, 9, 3)
else:
input_shape = (2, 3, 10, 9)
x = np.arange(540, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.average_pool(x, 2, 1, padding="valid"),
np_avgpool2d(x, 2, 1, padding="valid", data_format=data_format),
)
def test_average_pool_same_padding(self):
data_format = backend.config.image_data_format()
# Test 1D average pooling.
if data_format == "channels_last":
input_shape = (2, 20, 3)
else:
input_shape = (2, 3, 20)
x = np.arange(120, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.average_pool(x, 2, 2, padding="same"),
np_avgpool1d(x, 2, 2, padding="same", data_format=data_format),
)
# Test 2D average pooling.
if data_format == "channels_last":
input_shape = (2, 10, 9, 3)
else:
input_shape = (2, 3, 10, 9)
x = np.arange(540, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.average_pool(x, 2, (2, 1), padding="same"),
np_avgpool2d(x, 2, (2, 1), padding="same", data_format=data_format),
)
# Test 2D average pooling with different pool size.
if data_format == "channels_last":
input_shape = (2, 10, 9, 3)
else:
input_shape = (2, 3, 10, 9)
x = np.arange(540, dtype=float).reshape(input_shape)
self.assertAllClose(
knn.average_pool(x, (2, 3), (3, 3), padding="same"),
np_avgpool2d(
x, (2, 3), (3, 3), padding="same", data_format=data_format
),
)
@parameterized.product(
strides=(1, 2, 3),
padding=("valid", "same"),
dilation_rate=(1, 2),
)
def test_conv_1d(self, strides, padding, dilation_rate):
if strides > 1 and dilation_rate > 1:
pytest.skip("Unsupported configuration")
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 20, 3)
else:
input_shape = (2, 3, 20)
inputs_1d = np.arange(120, dtype=float).reshape(input_shape)
kernel = np.arange(24, dtype=float).reshape([4, 3, 2])
outputs = knn.conv(
inputs_1d,
kernel,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
)
expected = np_conv1d(
inputs_1d,
kernel,
bias_weights=np.zeros((2,)),
strides=strides,
padding=padding.lower(),
data_format=backend.config.image_data_format(),
dilation_rate=dilation_rate,
groups=1,
)
self.assertAllClose(outputs, expected)
@parameterized.product(strides=(1, 2, (1, 2)), padding=("valid", "same"))
def test_conv_2d(self, strides, padding):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 10, 3)
else:
input_shape = (2, 3, 10, 10)
inputs_2d = np.arange(600, dtype=float).reshape(input_shape)
kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
outputs = knn.conv(inputs_2d, kernel, strides, padding=padding)
expected = np_conv2d(
inputs_2d,
kernel,
bias_weights=np.zeros((2,)),
strides=strides,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs, expected)
@parameterized.product(strides=(1, 2), dilation_rate=(1, (2, 1)))
def test_conv_2d_group_2(self, strides, dilation_rate):
if (
backend.backend() == "tensorflow"
and strides == 2
and dilation_rate == (2, 1)
):
# This case is not supported by the TF backend.
return
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 10, 4)
else:
input_shape = (2, 4, 10, 10)
inputs_2d = np.ones(input_shape)
kernel = np.ones([2, 2, 2, 6])
outputs = knn.conv(
inputs_2d,
kernel,
strides,
padding="same",
dilation_rate=dilation_rate,
)
expected = np_conv2d(
inputs_2d,
kernel,
bias_weights=np.zeros((6,)),
strides=strides,
padding="same",
data_format=backend.config.image_data_format(),
dilation_rate=dilation_rate,
groups=1,
)
self.assertAllClose(outputs, expected)
@parameterized.product(
strides=(1, (1, 1, 1), 2),
padding=("valid", "same"),
data_format=("channels_first", "channels_last"),
)
def test_conv_3d(self, strides, padding, data_format):
if data_format == "channels_last":
input_shape = (2, 8, 8, 8, 3)
else:
input_shape = (2, 3, 8, 8, 8)
inputs_3d = np.arange(3072, dtype=float).reshape(input_shape)
kernel = np.arange(162, dtype=float).reshape([3, 3, 3, 3, 2])
outputs = knn.conv(
inputs_3d, kernel, strides, padding=padding, data_format=data_format
)
expected = np_conv3d(
inputs_3d,
kernel,
bias_weights=np.zeros((2,)),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
# Test for tracing error on tensorflow backend.
if backend.backend() == "tensorflow":
import tensorflow as tf
@tf.function
def conv(x):
return knn.conv(
x, kernel, strides, padding=padding, data_format=data_format
)
outputs = conv(inputs_3d)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
@parameterized.product(
strides=(1, (1, 1), (2, 2)),
padding=("valid", "same"),
dilation_rate=(1, (2, 2)),
)
def test_depthwise_conv_2d(self, strides, padding, dilation_rate):
if (
backend.backend() == "tensorflow"
and strides == (2, 2)
and dilation_rate == (2, 2)
):
# This case is not supported by the TF backend.
return
print(strides, padding, dilation_rate)
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 10, 3)
else:
input_shape = (2, 3, 10, 10)
inputs_2d = np.arange(600, dtype=float).reshape(input_shape)
kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
outputs = knn.depthwise_conv(
inputs_2d,
kernel,
strides,
padding=padding,
dilation_rate=dilation_rate,
)
expected = np_depthwise_conv2d(
inputs_2d,
kernel,
bias_weights=np.zeros((6,)),
strides=strides,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs, expected)
@parameterized.product(
strides=(1, 2),
padding=("valid", "same"),
dilation_rate=(1, (2, 2)),
)
def test_separable_conv_2d(self, strides, padding, dilation_rate):
if (
backend.backend() == "tensorflow"
and strides == 2
and dilation_rate == (2, 2)
):
# This case is not supported by the TF backend.
return
# Test 2D conv.
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 10, 3)
else:
input_shape = (2, 3, 10, 10)
inputs_2d = np.arange(600, dtype=float).reshape(input_shape)
depthwise_kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
pointwise_kernel = np.arange(72, dtype=float).reshape([1, 1, 6, 12])
outputs = knn.separable_conv(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
strides,
padding=padding,
dilation_rate=dilation_rate,
)
# Depthwise followed by pointwise conv
expected_depthwise = np_depthwise_conv2d(
inputs_2d,
depthwise_kernel,
np.zeros(6),
strides=strides,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=dilation_rate,
)
expected = np_conv2d(
expected_depthwise,
pointwise_kernel,
np.zeros(6 * 12),
strides=1,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=dilation_rate,
groups=1,
)
self.assertAllClose(outputs, expected)
@parameterized.product(padding=("valid", "same"))
def test_conv_transpose_1d(self, padding):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 4, 3)
else:
input_shape = (2, 3, 4)
inputs_1d = np.arange(24, dtype=float).reshape(input_shape)
kernel = np.arange(30, dtype=float).reshape([2, 5, 3])
outputs = knn.conv_transpose(inputs_1d, kernel, 2, padding=padding)
expected = np_conv1d_transpose(
inputs_1d,
kernel,
bias_weights=np.zeros(5),
strides=2,
output_padding=None,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=1,
)
self.assertAllClose(outputs, expected)
@parameterized.product(strides=(2, (2, 2)), padding=("valid", "same"))
def test_conv_transpose_2d(self, strides, padding):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 4, 4, 3)
else:
input_shape = (2, 3, 4, 4)
inputs_2d = np.arange(96, dtype=float).reshape(input_shape)
kernel = np.arange(60, dtype=float).reshape([2, 2, 5, 3])
outputs = knn.conv_transpose(
inputs_2d, kernel, strides, padding=padding
)
expected = np_conv2d_transpose(
inputs_2d,
kernel,
bias_weights=np.zeros(5),
strides=strides,
output_padding=None,
padding=padding,
data_format=backend.config.image_data_format(),
dilation_rate=1,
)
self.assertAllClose(outputs, expected)
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_one_hot(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors")
# Test 1D one-hot.
indices_1d = np.array([0, 1, 2, 3])
output_1d = knn.one_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d])
self.assertSparse(output_1d, sparse)
output_1d = knn.one_hot(indices_1d, 4, axis=0, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d])
self.assertSparse(output_1d, sparse)
# Test 1D list one-hot.
indices_1d = [0, 1, 2, 3]
output_1d = knn.one_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d])
self.assertSparse(output_1d, sparse)
output_1d = knn.one_hot(indices_1d, 4, axis=0, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d])
self.assertSparse(output_1d, sparse)
# Test 2D one-hot.
indices_2d = np.array([[0, 1], [2, 3]])
output_2d = knn.one_hot(indices_2d, 4, sparse=sparse)
self.assertAllClose(output_2d, np.eye(4)[indices_2d])
self.assertSparse(output_2d, sparse)
output_2d = knn.one_hot(indices_2d, 4, axis=2, sparse=sparse)
self.assertAllClose(output_2d, np.eye(4)[indices_2d])
self.assertSparse(output_2d, sparse)
output_2d = knn.one_hot(indices_2d, 4, axis=1, sparse=sparse)
self.assertAllClose(
output_2d, np.transpose(np.eye(4)[indices_2d], (0, 2, 1))
)
self.assertSparse(output_2d, sparse)
# Test 1D one-hot with 1 extra dimension.
indices_1d = np.array([[0], [1], [2], [3]])
output_1d = knn.one_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d])
self.assertSparse(output_1d, sparse)
output_1d = knn.one_hot(indices_1d, 4, axis=0, sparse=sparse)
self.assertAllClose(output_1d, np.eye(4)[indices_1d].swapaxes(1, 2))
self.assertSparse(output_1d, sparse)
# Test 1D one-hot with negative inputs
indices_1d = np.array([0, -1, -1, 3])
output_1d = knn.one_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(
output_1d,
np.array(
[
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
],
dtype=np.float32,
),
)
self.assertSparse(output_1d, sparse)
def test_binary_crossentropy(self):
# Test with from_logits=False
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.1], [0.2], [0.3], [0.4]])
result = knn.binary_crossentropy(target, output, from_logits=False)
self.assertAllClose(
result,
np.array([[0.32508277], [1.47080801], [0.52613434], [0.91629048]]),
)
# Test with from_logits=True
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.1], [0.2], [0.3], [0.4]])
result = knn.binary_crossentropy(target, output, from_logits=True)
self.assertAllClose(
result,
np.array([[0.73439666], [0.61813887], [0.79435524], [0.51301525]]),
)
# Test with output clipping
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.99], [-0.2], [0.9], [-0.4]])
result = knn.binary_crossentropy(target, output, from_logits=True)
self.assertAllClose(
result,
np.array([[1.206961], [0.778139], [1.061154], [0.913015]]),
)
def test_categorical_crossentropy(self):
target = np.array(
[
[0.33008796, 0.0391289, 0.9503603],
[0.80376694, 0.92363342, 0.19147756],
]
)
output = np.array(
[
[0.23446431, 0.35822914, 0.06683268],
[0.3413979, 0.05420256, 0.81619654],
]
)
# Test from_logits=False
result = knn.categorical_crossentropy(
target, output, from_logits=False, axis=-1
)
self.assertAllClose(result, np.array([2.54095299, 3.96374412]))
# Test axis
result = knn.categorical_crossentropy(
target, output, from_logits=False, axis=0
)
self.assertAllClose(
result, np.array([0.71683073, 1.87988172, 2.46810762])
)
# Test from_logits=True
result = knn.categorical_crossentropy(
target, output, from_logits=True, axis=-1
)
self.assertAllClose(result, np.array([1.59419954, 2.49880593]))
# Test with output clipping
output = np.array(
[
[1.23446431, -0.35822914, 1.06683268],
[0.3413979, -0.05420256, 0.81619654],
]
)
result = knn.categorical_crossentropy(
target, output, from_logits=True, axis=-1
)
self.assertAllClose(result, np.array([1.16825923, 2.55436813]))
def test_sparse_categorical_crossentropy(self):
target = np.array([0, 1, 2])
output = np.array(
[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06], [0.05, 0.01, 0.94]]
)
result = knn.sparse_categorical_crossentropy(target, output)
self.assertAllClose(result, [0.105361, 0.116534, 0.061875])
output = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
result = knn.sparse_categorical_crossentropy(
target, output, from_logits=True
)
self.assertAllClose(result, [0.001822, 0.000459, 0.169846])
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_multi_hot(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors")
# Test 1D multi-hot.
indices_1d = np.array([0, 1, 2, 3])
expected_output_1d = np.array([1, 1, 1, 1])
output_1d = knn.multi_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(output_1d, expected_output_1d)
self.assertSparse(output_1d, sparse)
# Test 2D multi-hot.
indices_2d = np.array([[0, 1], [2, 3]])
expected_output_2d = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
output_2d = knn.multi_hot(indices_2d, 4, sparse=sparse)
self.assertAllClose(output_2d, expected_output_2d)
self.assertSparse(output_2d, sparse)
# Test 1D multi-hot with negative inputs
indices_1d = np.array([0, -1, -1, 3])
expected_output_1d = np.array([1, 0, 0, 1])
output_1d = knn.multi_hot(indices_1d, 4, sparse=sparse)
self.assertAllClose(output_1d, expected_output_1d)
self.assertSparse(output_1d, sparse)
def test_moments(self):
# Test 1D moments
x = np.array([0, 1, 2, 3, 4, 100, -200]).astype(np.float32)
mean, variance = knn.moments(x, axes=[0])
self.assertAllClose(mean, np.mean(x), atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, np.var(x), atol=1e-5, rtol=1e-5)
# Test batch statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0])
self.assertAllClose(mean, np.mean(x, axis=0), atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, np.var(x, axis=0), atol=1e-5, rtol=1e-5)
# Test global statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0, 1, 2])
expected_mean = np.mean(x, axis=(0, 1, 2))
expected_variance = np.var(x, axis=(0, 1, 2))
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
# Test keepdims
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0, 1, 2], keepdims=True)
expected_mean = np.mean(x, axis=(0, 1, 2), keepdims=True)
expected_variance = np.var(x, axis=(0, 1, 2), keepdims=True)
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
# Test float16 which causes overflow
x = np.array(
[-741.0, 353.2, 1099.0, -1807.0, 502.8, -83.4, 333.5, -130.9],
dtype=np.float16,
)
mean, variance = knn.moments(x, axes=[0])
expected_mean = np.mean(x.astype(np.float32)).astype(np.float16)
# the output variance is clipped to the max value of np.float16 because
# it is overflowed
expected_variance = np.finfo(np.float16).max
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="synchronized=True only implemented for TF backend",
)
def test_moments_sync(self):
# Test batch statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0], synchronized=True)
self.assertAllClose(mean, np.mean(x, axis=0), atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, np.var(x, axis=0), atol=1e-5, rtol=1e-5)
# Test global statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0, 1, 2], synchronized=True)
expected_mean = np.mean(x, axis=(0, 1, 2))
expected_variance = np.var(x, axis=(0, 1, 2))
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
# Test keepdims
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(
x, axes=[0, 1, 2], keepdims=True, synchronized=True
)
expected_mean = np.mean(x, axis=(0, 1, 2), keepdims=True)
expected_variance = np.var(x, axis=(0, 1, 2), keepdims=True)
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
@parameterized.product(dtype=["float16", "float32"])
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="synchronized=True only implemented for TF backend",
)
def test_moments_sync_with_distribution_strategy(self, dtype):
from tensorflow.python.eager import context
from keras.src.utils.module_utils import tensorflow as tf
context._reset_context()
# Config 2 CPUs for testing.
logical_cpus = tf.config.list_logical_devices("CPU")
if len(logical_cpus) == 1:
from tensorflow.python.eager import context
context._reset_context()
tf.config.set_logical_device_configuration(
tf.config.list_physical_devices("CPU")[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
@tf.function()
def test_on_moments(inputs):
return knn.moments(
inputs, axes=-1, keepdims=True, synchronized=True
)
# Test output of moments.
inputs = tf.constant([5.0, 9.0, 1.0, 3.0], dtype=dtype)
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
mean, variance = strategy.run(test_on_moments, args=(inputs,))
self.assertEqual(mean.values[0], 4.5)
self.assertEqual(variance.values[0], 8.75)
self.assertEqual(variance.values[0], 8.75)
context._reset_context()
def test_batch_normalization(self):
x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
mean = np.array([0.2, 0.3, 0.4])
variance = np.array([4.0, 16.0, 64.0])
output = knn.batch_normalization(
x,
mean,
variance,
axis=-1,
offset=np.array([5.0, 10.0, 15.0]),
scale=np.array([10.0, 20.0, 30.0]),
epsilon=1e-7,
)
expected_output = np.array([[4.5, 9.5, 14.625], [6.0, 11.0, 15.75]])
self.assertAllClose(output, expected_output)
output = knn.batch_normalization(
x,
mean,
variance,
axis=1,
epsilon=1e-7,
)
expected_output = np.array(
[[-0.05, -0.025, -0.0125], [0.1, 0.05, 0.025]]
)
self.assertAllClose(output, expected_output)
output = knn.batch_normalization(
np.random.uniform(size=[2, 3, 3, 5]),
np.random.uniform(size=[5]),
np.random.uniform(size=[5]),
axis=3,
offset=np.random.uniform(size=[5]),
scale=np.random.uniform(size=[5]),
)
self.assertEqual(tuple(output.shape), (2, 3, 3, 5))
def test_ctc_loss(self):
labels = np.array([[1, 2, 1], [1, 2, 2]])
outputs = np.array(
[
[[0.4, 0.8, 0.4], [0.2, 0.8, 0.3], [0.9, 0.4, 0.5]],
[[0.4, 0.8, 0.4], [0.2, 0.3, 0.3], [0.4, 0.3, 0.2]],
]
)
label_length = np.array([3, 2])
output_length = np.array([3, 2])
result = knn.ctc_loss(labels, outputs, label_length, output_length)
self.assertAllClose(result, np.array([3.4411672, 1.91680186]))
def test_ctc_decode(self):
inputs = np.array(
[
[
[0.1, 0.4, 0.2, 0.4],
[0.3, -0.3, 0.4, 0.2],
[0.3, 0.2, 0.4, 0.3],
],
[
[0.7, 0.4, 0.3, 0.2],
[0.3, 0.3, 0.4, 0.1],
[0.6, -0.1, 0.1, 0.5],
],
[
[0.1, 0.4, 0.2, 0.7],
[0.3, 0.3, -0.2, 0.7],
[0.3, 0.2, 0.4, 0.1],
],
]
)
labels = np.array([[1, 2, -1], [2, -1, -1], [3, -1, -1]])
score_labels = np.array([[-1.2], [-1.7], [-0.7]])
repeated_labels = np.array([[1, 2, 2], [2, -1, -1], [3, -1, -1]])
# Test strategy="greedy" and merge_repeated=True
(decoded,), scores = knn.ctc_decode(
inputs,
sequence_lengths=[3, 3, 1],
strategy="greedy",
mask_index=0,
)
self.assertAllClose(decoded, labels)
self.assertAllClose(scores, score_labels)
# Test strategy="greedy" and merge_repeated=False
(decoded,), scores = knn.ctc_decode(
inputs,
sequence_lengths=[3, 3, 1],
strategy="greedy",
merge_repeated=False,
mask_index=0,
)
self.assertAllClose(decoded, repeated_labels)
self.assertAllClose(scores, score_labels)
if backend.backend() == "torch":
self.skipTest("torch doesn't support 'beam_search' strategy")
labels = np.array(
[
[[1, 2, -1], [2, -1, -1], [3, -1, -1]],
[[2, -1, -1], [3, -1, -1], [1, -1, -1]],
]
)
score_labels = np.array(
[
[-2.426537, -2.435596],
[-2.127681, -2.182338],
[-1.063386, -1.363386],
]
)
beam_width = 4
top_paths = 2
# Test strategy="beam_search"
decoded, scores = knn.ctc_decode(
inputs,
sequence_lengths=[3, 3, 1],
strategy="beam_search",
beam_width=beam_width,
top_paths=top_paths,
mask_index=0,
)
self.assertAllClose(decoded, labels)
self.assertAllClose(scores, score_labels)
def test_normalize(self):
x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllClose(
knn.normalize(x, axis=None),
[
[0.18898225, 0.3779645, 0.56694674],
[0.18898225, 0.3779645, 0.56694674],
],
)
self.assertAllClose(
knn.normalize(x, axis=0),
[
[0.70710677, 0.70710677, 0.70710677],
[0.70710677, 0.70710677, 0.70710677],
],
)
self.assertAllClose(
knn.normalize(x, axis=-1),
[
[0.26726124, 0.53452247, 0.8017837],
[0.26726124, 0.53452247, 0.8017837],
],
)
self.assertAllClose(
knn.normalize(x, order=3),
[
[0.30285344, 0.6057069, 0.9085603],
[0.30285344, 0.6057069, 0.9085603],
],
)
# linalg.norm(x, ...) < epsilon
x = np.array([[1e-6, 1e-8]], dtype=np.float32)
self.assertAllClose(
knn.normalize(x, axis=-1, order=2, epsilon=1e-5),
[[1e-1, 1e-3]],
)
def test_psnr(self):
x1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
x2 = np.array([[0.2, 0.2, 0.3], [0.4, 0.6, 0.6]])
max_val = 1.0
expected_psnr_1 = 20 * np.log10(max_val) - 10 * np.log10(
np.mean(np.square(x1 - x2))
)
psnr_1 = knn.psnr(x1, x2, max_val)
self.assertAlmostEqual(psnr_1, expected_psnr_1)
x3 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
x4 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
max_val = 1.0
expected_psnr_2 = 20 * np.log10(max_val) - 10 * np.log10(
np.mean(np.square(x3 - x4))
)
psnr_2 = knn.psnr(x3, x4, max_val)
self.assertAlmostEqual(psnr_2, expected_psnr_2)
@parameterized.named_parameters(
named_product(
bias=(None, True),
scale=(None, 1.0),
mask_and_is_causal=((None, False), (True, False), (None, True)),
flash_attention=(None, True, False),
)
)
def test_dot_product_attention(
self, bias, scale, mask_and_is_causal, flash_attention
):
mask, is_causal = mask_and_is_causal
query_shape = (2, 3, 4, 8)
key_shape = (2, 3, 4, 8)
bias_shape = (2, 4, 3, 3)
query = np.arange(math.prod(query_shape), dtype=float).reshape(
query_shape
)
key = np.arange(math.prod(key_shape), dtype=float).reshape(key_shape)
value = np.arange(math.prod(key_shape), dtype=float).reshape(key_shape)
if mask is not None:
mask = np.tril(np.ones((3, 3))).astype("bool")
mask = mask[None, None, ...]
mask = np.tile(mask, (2, 4, 1, 1))
if bias is not None:
if backend.backend() == "torch":
self.skipTest(
"torch does not support `bias` with `dot_product_attention`"
)
bias = np.arange(math.prod(bias_shape), dtype=float).reshape(
bias_shape
)
if flash_attention:
if backend.backend() in ("tensorflow", "numpy"):
self.skipTest(
"Flash attention is not supported in tensorflow and numpy "
"backends."
)
elif backend.backend() == "torch":
import torch
if mask is not None:
self.skipTest(
"Flash attention doesn't support `mask=None` in torch "
"backend."
)
if not torch.cuda.is_available():
self.skipTest(
"Flash attention must be run on CUDA in torch backend."
)
cuda_compute_capability = tuple(
int(x) for x in torch.cuda.get_device_capability()
)
if cuda_compute_capability < (8, 0):
self.skipTest(
"Flash attention must be run on CUDA compute "
"capability >= 8.0 in torch backend."
)
elif backend.backend() == "jax":
import jax
from jax._src import xla_bridge
if "cuda" not in xla_bridge.get_backend().platform_version:
self.skipTest(
"Flash attention must be run on CUDA in jax backend."
)
d, *_ = jax.local_devices(backend="gpu")
cuda_compute_capability = tuple(
int(x) for x in d.compute_capability.split(".")
)
if cuda_compute_capability < (8, 0):
self.skipTest(
"Flash attention must be run on CUDA compute "
"capability >= 8.0 in jax backend."
)
# Flash attention only supports float16 and bfloat16. We multiply
# 0.1 to avoid overflow.
query = (query * 0.1).astype("float16")
key = (key * 0.1).astype("float16")
value = (value * 0.1).astype("float16")
if bias is not None:
bias = (bias * 0.1).astype("float16")
outputs = knn.dot_product_attention(
query,
key,
value,
bias=bias,
mask=mask,
scale=scale,
is_causal=is_causal,
flash_attention=flash_attention,
)
expected = _dot_product_attention(
query,
key,
value,
bias=bias,
mask=mask,
scale=scale,
is_causal=is_causal,
)
self.assertAllClose(
outputs, expected, atol=1e-3 if flash_attention else 1e-6
)
@parameterized.named_parameters(named_product(scale=(1.0, 10.0)))
def test_rms_normalization(self, scale):
x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype="float32")
scale = np.array([scale] * x.shape[-1], dtype="float32")
expected_output = (
np.array([[0.46291, 0.92582, 1.38873], [0.78954, 0.98693, 1.18431]])
* scale
)
self.assertAllClose(
knn.rms_normalization(x, scale), expected_output, atol=1e-3
)
self.assertAllClose(knn.RMSNorm()(x, scale), expected_output, atol=1e-3)
def test_layer_normalization(self):
x = np.arange(5, dtype="float32")
expected_output = np.array(
[-1.4142135, -0.70710677, 0.0, 0.7071067, 1.4142135]
)
self.assertAllClose(
knn.layer_normalization(x), expected_output, atol=1e-3
)
self.assertAllClose(knn.LayerNorm()(x), expected_output, atol=1e-3)
| NNOpsCorrectnessTest |
python | python__mypy | mypy/stubtest.py | {
"start": 2097,
"end": 33565
} | class ____:
def __init__(
self,
object_path: list[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: str | None = None,
runtime_desc: str | None = None,
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_path = object_path
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
if runtime_desc is None:
runtime_sig = safe_inspect_signature(runtime_object)
if runtime_sig is None:
self.runtime_desc = _truncate(repr(runtime_object), 100)
else:
runtime_is_async = inspect.iscoroutinefunction(runtime_object)
description = describe_runtime_callable(runtime_sig, is_async=runtime_is_async)
self.runtime_desc = _truncate(description, 100)
else:
self.runtime_desc = runtime_desc
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "should be positional" in self.message
def is_disjoint_base_related(self) -> bool:
"""Whether or not the error is related to @disjoint_base."""
# TODO: This is hacky, use error codes or something more resilient
return "@disjoint_base" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
stub_node = get_stub(self.object_path[0])
if stub_node is not None:
stub_file = stub_node.path or None
stub_loc_str = ""
if stub_file:
stub_loc_str += f" in file {Path(stub_file)}"
if stub_line:
stub_loc_str += f"{':' if stub_file else ' at line '}{stub_line}"
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError, SyntaxError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_file:
runtime_loc_str += f" in file {Path(runtime_file)}"
if runtime_line:
runtime_loc_str += f"{':' if runtime_file else ' at line '}{runtime_line}"
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
# ====================
# Core logic
# ====================
def silent_import_module(module_name: str) -> types.ModuleType:
with open(os.devnull, "w") as devnull:
with warnings.catch_warnings(), redirect_stdout(devnull), redirect_stderr(devnull):
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
# Also run the equivalent of `from module import *`
# This could have the additional effect of loading not-yet-loaded submodules
# mentioned in __all__
__import__(module_name, fromlist=["*"])
return runtime
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
if not is_probably_private(module_name.split(".")[-1]):
runtime_desc = repr(sys.modules[module_name]) if module_name in sys.modules else "N/A"
yield Error(
[module_name], "failed to find stubs", MISSING, None, runtime_desc=runtime_desc
)
return
try:
runtime = silent_import_module(module_name)
except KeyboardInterrupt:
raise
except BaseException as e:
note = ""
if isinstance(e, ModuleNotFoundError):
note = " Maybe install the runtime package or alter PYTHONPATH?"
yield Error(
[module_name], f"failed to import.{note} {type(e).__name__}: {e}", stub, MISSING
)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
yield from verify(stub, runtime, [module_name])
except Exception as e:
bottom_frame = list(traceback.walk_tb(e.__traceback__))[-1][0]
bottom_module = bottom_frame.f_globals.get("__name__", "")
# Pass on any errors originating from stubtest or mypy
# These can occur expectedly, e.g. StubtestFailure
if bottom_module == "__main__" or bottom_module.split(".")[0] == "mypy":
raise
yield Error(
[module_name],
f"encountered unexpected error, {type(e).__name__}: {e}",
stub,
runtime,
stub_desc="N/A",
runtime_desc=(
"This is most likely the fault of something very dynamic in your library. "
"It's also possible this is a bug in stubtest.\nIf in doubt, please "
"open an issue at https://github.com/python/mypy\n\n"
+ traceback.format_exc().strip()
),
)
@singledispatch
def verify(
stub: MaybeMissing[nodes.Node], runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
def _verify_exported_names(
object_path: list[str], stub: nodes.MypyFile, runtime_all_as_set: set[str]
) -> Iterator[Error]:
# note that this includes the case the stub simply defines `__all__: list[str]`
assert "__all__" in stub.names
public_names_in_stub = {m for m, o in stub.names.items() if o.module_public}
names_in_stub_not_runtime = sorted(public_names_in_stub - runtime_all_as_set)
names_in_runtime_not_stub = sorted(runtime_all_as_set - public_names_in_stub)
if not (names_in_runtime_not_stub or names_in_stub_not_runtime):
return
yield Error(
object_path + ["__all__"],
(
"names exported from the stub do not correspond to the names exported at runtime. "
"This is probably due to things being missing from the stub or an inaccurate `__all__` in the stub"
),
# Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very
# relevant here, and it makes for a prettier error message
# This means this error will be ignored when using `--ignore-missing-stub`, which is
# desirable in at least the `names_in_runtime_not_stub` case
stub_object=MISSING,
runtime_object=MISSING,
stub_desc=(f"Names exported in the stub but not at runtime: {names_in_stub_not_runtime}"),
runtime_desc=(
f"Names exported at runtime but not in the stub: {names_in_runtime_not_stub}"
),
)
@functools.lru_cache
def _module_symbol_table(runtime: types.ModuleType) -> symtable.SymbolTable | None:
"""Retrieve the symbol table for the module (or None on failure).
1) Use inspect to retrieve the source code of the module
2) Use symtable to parse the source (and use what symtable knows for its purposes)
"""
try:
source = inspect.getsource(runtime)
except (OSError, TypeError, SyntaxError):
return None
try:
return symtable.symtable(source, runtime.__name__, "exec")
except SyntaxError:
return None
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
# Can possibly happen:
yield Error(object_path, "is not a module", stub, runtime) # type: ignore[unreachable]
return
runtime_all_as_set: set[str] | None
if hasattr(runtime, "__all__"):
runtime_all_as_set = set(runtime.__all__)
if "__all__" in stub.names:
# Only verify the contents of the stub's __all__
# if the stub actually defines __all__
yield from _verify_exported_names(object_path, stub, runtime_all_as_set)
else:
yield Error(object_path + ["__all__"], "is not present in stub", MISSING, runtime)
else:
runtime_all_as_set = None
# Check things in the stub
to_check = {
m
for m, o in stub.names.items()
if not o.module_hidden and (not is_probably_private(m) or hasattr(runtime, m))
}
def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool:
"""Heuristics to determine whether a name originates from another module."""
obj = getattr(r, attr)
if isinstance(obj, types.ModuleType):
return False
symbol_table = _module_symbol_table(r)
if symbol_table is not None:
try:
symbol = symbol_table.lookup(attr)
except KeyError:
pass
else:
if symbol.is_imported():
# symtable says we got this from another module
return False
# But we can't just return True here, because symtable doesn't know about symbols
# that come from `from module import *`
if symbol.is_assigned():
# symtable knows we assigned this symbol in the module
return True
# The __module__ attribute is unreliable for anything except functions and classes,
# but it's our best guess at this point
try:
obj_mod = obj.__module__
except Exception:
pass
else:
if isinstance(obj_mod, str):
return bool(obj_mod == r.__name__)
return True
runtime_public_contents = (
runtime_all_as_set
if runtime_all_as_set is not None
else {
m
for m in dir(runtime)
if not is_probably_private(m)
# Filter out objects that originate from other modules (best effort). Note that in the
# absence of __all__, we don't have a way to detect explicit / intentional re-exports
# at runtime
and _belongs_to_runtime(runtime, m)
}
)
# Check all things declared in module's __all__, falling back to our best guess
to_check.update(runtime_public_contents)
to_check.difference_update(IGNORED_MODULE_DUNDERS)
for entry in sorted(to_check):
stub_entry = stub.names[entry].node if entry in stub.names else MISSING
if isinstance(stub_entry, nodes.MypyFile):
# Don't recursively check exported modules, since that leads to infinite recursion
continue
assert stub_entry is not None
try:
runtime_entry = getattr(runtime, entry, MISSING)
except Exception:
# Catch all exceptions in case the runtime raises an unexpected exception
# from __getattr__ or similar.
continue
yield from verify(stub_entry, runtime_entry, object_path + [entry])
def _verify_final(
stub: nodes.TypeInfo, runtime: type[Any], object_path: list[str]
) -> Iterator[Error]:
try:
class SubClass(runtime): # type: ignore[misc]
pass
except TypeError:
# Enum classes are implicitly @final
if not stub.is_final and not issubclass(runtime, enum.Enum):
yield Error(
object_path,
"cannot be subclassed at runtime, but isn't marked with @final in the stub",
stub,
runtime,
stub_desc=repr(stub),
)
except Exception:
# The class probably wants its subclasses to do something special.
# Examples: ctypes.Array, ctypes._SimpleCData
pass
# Runtime class might be annotated with `@final`:
try:
runtime_final = getattr(runtime, "__final__", False)
except Exception:
runtime_final = False
if runtime_final and not stub.is_final:
yield Error(
object_path,
"has `__final__` attribute, but isn't marked with @final in the stub",
stub,
runtime,
stub_desc=repr(stub),
)
SIZEOF_PYOBJECT = struct.calcsize("P")
def _shape_differs(t1: type[object], t2: type[object]) -> bool:
"""Check whether two types differ in shape.
Mirrors the shape_differs() function in typeobject.c in CPython."""
if sys.version_info >= (3, 12):
return t1.__basicsize__ != t2.__basicsize__ or t1.__itemsize__ != t2.__itemsize__
else:
# CPython had more complicated logic before 3.12:
# https://github.com/python/cpython/blob/f3c6f882cddc8dc30320d2e73edf019e201394fc/Objects/typeobject.c#L2224
# We attempt to mirror it here well enough to support the most common cases.
if t1.__itemsize__ or t2.__itemsize__:
return t1.__basicsize__ != t2.__basicsize__ or t1.__itemsize__ != t2.__itemsize__
t_size = t1.__basicsize__
if not t2.__weakrefoffset__ and t1.__weakrefoffset__ + SIZEOF_PYOBJECT == t_size:
t_size -= SIZEOF_PYOBJECT
if not t2.__dictoffset__ and t1.__dictoffset__ + SIZEOF_PYOBJECT == t_size:
t_size -= SIZEOF_PYOBJECT
if not t2.__weakrefoffset__ and t2.__weakrefoffset__ == t_size:
t_size -= SIZEOF_PYOBJECT
return t_size != t2.__basicsize__
def _is_disjoint_base(typ: type[object]) -> bool:
"""Return whether a type is a disjoint base at runtime, mirroring CPython's logic in typeobject.c.
See PEP 800."""
if typ is object:
return True
base = typ.__base__
assert base is not None, f"Type {typ} has no base"
return _shape_differs(typ, base)
def _verify_disjoint_base(
stub: nodes.TypeInfo, runtime: type[object], object_path: list[str]
) -> Iterator[Error]:
is_disjoint_runtime = _is_disjoint_base(runtime)
# Don't complain about missing @disjoint_base if there are __slots__, because
# in that case we can infer that it's a disjoint base.
if (
is_disjoint_runtime
and not stub.is_disjoint_base
and not runtime.__dict__.get("__slots__")
and not stub.is_final
and not (stub.is_enum and stub.enum_members)
):
yield Error(
object_path,
"is a disjoint base at runtime, but isn't marked with @disjoint_base in the stub",
stub,
runtime,
stub_desc=repr(stub),
)
elif stub.is_disjoint_base:
if not is_disjoint_runtime:
yield Error(
object_path,
"is marked with @disjoint_base in the stub, but isn't a disjoint base at runtime",
stub,
runtime,
stub_desc=repr(stub),
)
if runtime.__dict__.get("__slots__"):
yield Error(
object_path,
"is marked as @disjoint_base, but also has slots; add __slots__ instead",
stub,
runtime,
stub_desc=repr(stub),
)
elif stub.is_final:
yield Error(
object_path,
"is marked as @disjoint_base, but also marked as @final; remove @disjoint_base",
stub,
runtime,
stub_desc=repr(stub),
)
elif stub.is_enum and stub.enum_members:
yield Error(
object_path,
"is marked as @disjoint_base, but is an enum with members, which is implicitly final; "
"remove @disjoint_base",
stub,
runtime,
stub_desc=repr(stub),
)
def _verify_metaclass(
stub: nodes.TypeInfo, runtime: type[Any], object_path: list[str], *, is_runtime_typeddict: bool
) -> Iterator[Error]:
# We exclude protocols, because of how complex their implementation is in different versions of
# python. Enums are also hard, as are runtime TypedDicts; ignoring.
# TODO: check that metaclasses are identical?
if not stub.is_protocol and not stub.is_enum and not is_runtime_typeddict:
runtime_metaclass = type(runtime)
if runtime_metaclass is not type and stub.metaclass_type is None:
# This means that runtime has a custom metaclass, but a stub does not.
yield Error(
object_path,
"is inconsistent, metaclass differs",
stub,
runtime,
stub_desc="N/A",
runtime_desc=f"{runtime_metaclass}",
)
elif (
runtime_metaclass is type
and stub.metaclass_type is not None
# We ignore extra `ABCMeta` metaclass on stubs, this might be typing hack.
# We also ignore `builtins.type` metaclass as an implementation detail in mypy.
and not mypy.types.is_named_instance(
stub.metaclass_type, ("abc.ABCMeta", "builtins.type")
)
):
# This means that our stub has a metaclass that is not present at runtime.
yield Error(
object_path,
"metaclass mismatch",
stub,
runtime,
stub_desc=f"{stub.metaclass_type.type.fullname}",
runtime_desc="N/A",
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo,
runtime: MaybeMissing[type[Any]],
object_path: list[str],
*,
is_alias_target: bool = False,
) -> Iterator[Error]:
if stub.is_type_check_only and not is_alias_target:
# This type only exists in stubs, we only check that the runtime part
# is missing. Other checks are not required.
if not isinstance(runtime, Missing):
yield Error(
object_path,
'is marked as "@type_check_only", but also exists at runtime',
stub,
runtime,
stub_desc=repr(stub),
)
return
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
# Yes, some runtime objects can be not types, no way to tell mypy about that.
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub)) # type: ignore[unreachable]
return
yield from _verify_final(stub, runtime, object_path)
yield from _verify_disjoint_base(stub, runtime, object_path)
is_runtime_typeddict = stub.typeddict_type is not None and is_typeddict(runtime)
yield from _verify_metaclass(
stub, runtime, object_path, is_runtime_typeddict=is_runtime_typeddict
)
# Check everything already defined on the stub class itself (i.e. not inherited)
#
# Filter out non-identifier names, as these are (hopefully always?) whacky/fictional things
# (like __mypy-replace or __mypy-post_init, etc.) that don't exist at runtime,
# and exist purely for internal mypy reasons
to_check = {name for name in stub.names if name.isidentifier()}
# Check all public things on the runtime class
to_check.update(
m for m in vars(runtime) if not is_probably_private(m) and m not in IGNORABLE_CLASS_DUNDERS
)
# Special-case the __init__ method for Protocols and the __new__ method for TypedDicts
#
# TODO: On Python <3.11, __init__ methods on Protocol classes
# are silently discarded and replaced.
# However, this is not the case on Python 3.11+.
# Ideally, we'd figure out a good way of validating Protocol __init__ methods on 3.11+.
if stub.is_protocol:
to_check.discard("__init__")
if is_runtime_typeddict:
to_check.discard("__new__")
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = f"_{stub.name.lstrip('_')}{entry}"
stub_to_verify = next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING)
assert stub_to_verify is not None
try:
try:
runtime_attr = getattr(runtime, mangled_entry)
except AttributeError:
runtime_attr = inspect.getattr_static(runtime, mangled_entry, MISSING)
except Exception:
# Catch all exceptions in case the runtime raises an unexpected exception
# from __getattr__ or similar.
continue
# If it came from the metaclass, consider the runtime_attr to be MISSING
# for a more accurate message
if (
runtime_attr is not MISSING
and type(runtime) is not runtime
and getattr(runtime_attr, "__objclass__", None) is type(runtime)
):
runtime_attr = MISSING
# __setattr__ and __delattr__ on object are a special case,
# so if we only have these methods inherited from there, pretend that
# we don't have them. See python/typeshed#7385.
if (
entry in ("__setattr__", "__delattr__")
and runtime_attr is not MISSING
and runtime is not object
and getattr(runtime_attr, "__objclass__", None) is object
):
runtime_attr = MISSING
# Do not error for an object missing from the stub
# If the runtime object is a types.WrapperDescriptorType object
# and has a non-special dunder name.
# The vast majority of these are false positives.
if not (
isinstance(stub_to_verify, Missing)
and isinstance(runtime_attr, types.WrapperDescriptorType)
and is_dunder(mangled_entry, exclude_special=True)
):
yield from verify(stub_to_verify, runtime_attr, object_path + [entry])
def _static_lookup_runtime(object_path: list[str]) -> MaybeMissing[Any]:
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in several places.
return MISSING
return static_runtime
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, static_runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
if static_runtime is MISSING:
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
if (
stub_arg.variable.name == runtime_arg.name
or stub_arg.variable.name.removeprefix("__") == runtime_arg.name
):
return
nonspecific_names = {"object", "args"}
if runtime_arg.name in nonspecific_names:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
f'stub parameter "{stub_arg.variable.name}" '
f'differs from runtime parameter "{runtime_arg.name}"'
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default is not inspect.Parameter.empty:
if stub_arg.kind.is_required():
yield (
f'runtime parameter "{runtime_arg.name}" '
"has a default value but stub parameter does not"
)
else:
type_context = stub_arg.variable.type
runtime_type = get_mypy_type_of_runtime_value(
runtime_arg.default, type_context=type_context
)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) is not object
# And ellipsis
and runtime_arg.default is not ...
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
f'runtime parameter "{runtime_arg.name}" '
f"has a default value of type {runtime_type}, "
f"which is incompatible with stub parameter type {stub_type}"
)
if stub_arg.initializer is not None:
stub_default = evaluate_expression(stub_arg.initializer)
if (
stub_default is not UNKNOWN
and stub_default is not ...
and runtime_arg.default is not UNREPRESENTABLE
):
defaults_match = True
# We want the types to match exactly, e.g. in case the stub has
# True and the runtime has 1 (or vice versa).
if type(stub_default) is not type(runtime_arg.default):
defaults_match = False
else:
try:
defaults_match = bool(stub_default == runtime_arg.default)
except Exception:
# Exception can be raised in bool dunder method (e.g. numpy arrays)
# At this point, consider the default to be different, it is probably
# too complex to put in a stub anyway.
defaults_match = False
if not defaults_match:
yield (
f'runtime parameter "{runtime_arg.name}" '
f"has a default value of {runtime_arg.default!r}, "
f"which is different from stub parameter default {stub_default!r}"
)
else:
if stub_arg.kind.is_optional():
yield (
f'stub parameter "{stub_arg.variable.name}" has a default value '
f"but runtime parameter does not"
)
def maybe_strip_cls(name: str, args: list[nodes.Argument]) -> list[nodes.Argument]:
if args and name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
| Error |
python | xlwings__xlwings | xlwings/pro/_xlcalamine.py | {
"start": 5957,
"end": 6787
} | class ____(base_classes.Sheets):
def __init__(self, book):
self.book = book
@property
def api(self):
return None
def __call__(self, name_or_index):
if isinstance(name_or_index, str):
sheet_names = self.book.api["sheet_names"]
if name_or_index not in sheet_names:
raise NoSuchObjectError(f"Sheet {name_or_index} doesn't exist.")
else:
ix = self.book.api["sheet_names"].index(name_or_index) + 1
else:
ix = name_or_index
return Sheet(book=self.book, sheet_index=ix)
def __len__(self):
return len(self.book.api["sheet_names"])
def __iter__(self):
for ix, sheet in enumerate(self.book.api["sheet_names"]):
yield Sheet(book=self.book, sheet_index=ix + 1)
| Sheets |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_mask.py | {
"start": 244,
"end": 4898
} | class ____:
def test_mask(self):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
cond = df > 0
rs = df.where(cond, np.nan)
tm.assert_frame_equal(rs, df.mask(df <= 0))
tm.assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
rs = df.where(cond, other)
tm.assert_frame_equal(rs, df.mask(df <= 0, other))
tm.assert_frame_equal(rs, df.mask(~cond, other))
def test_mask2(self):
# see GH#21891
df = DataFrame([1, 2])
res = df.mask([[True], [False]])
exp = DataFrame([np.nan, 2])
tm.assert_frame_equal(res, exp)
def test_mask_inplace(self):
# GH#8801
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
cond = df > 0
rdf = df.copy()
result = rdf.where(cond, inplace=True)
assert result is rdf
tm.assert_frame_equal(rdf, df.where(cond))
tm.assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
result = rdf.where(cond, -df, inplace=True)
assert result is rdf
tm.assert_frame_equal(rdf, df.where(cond, -df))
tm.assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH#4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[np.nan, 2]])
tm.assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH#12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_mask_dtype_bool_conversion(self):
# GH#3733
df = DataFrame(data=np.random.default_rng(2).standard_normal((100, 50)))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isna(df)
expected = bools.astype(object).mask(mask)
result = bools.mask(mask)
tm.assert_frame_equal(result, expected)
def test_mask_stringdtype(frame_or_series):
# GH 40824
obj = DataFrame(
{"A": ["foo", "bar", "baz", NA]},
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
filtered_obj = DataFrame(
{"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype()
)
expected = DataFrame(
{"A": ["foo", "this", "that", NA]},
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
if frame_or_series is Series:
obj = obj["A"]
filtered_obj = filtered_obj["A"]
expected = expected["A"]
filter_ser = Series(
[False, True, True, False],
index=["id1", "id2", "id3", "id4"],
)
result = obj.mask(filter_ser, filtered_obj)
tm.assert_equal(result, expected)
def test_mask_where_dtype_timedelta():
# https://github.com/pandas-dev/pandas/issues/39548
df = DataFrame([Timedelta(i, unit="D") for i in range(5)])
expected = DataFrame(np.full(5, np.nan, dtype="timedelta64[ns]"))
tm.assert_frame_equal(df.mask(df.notna()), expected)
expected = DataFrame(
[np.nan, np.nan, np.nan, Timedelta("3 day"), Timedelta("4 day")]
)
tm.assert_frame_equal(df.where(df > Timedelta(2, unit="D")), expected)
def test_mask_return_dtype():
# GH#50488
ser = Series([0.0, 1.0, 2.0, 3.0], dtype=Float64Dtype())
cond = ~ser.isna()
other = Series([True, False, True, False])
excepted = Series([1.0, 0.0, 1.0, 0.0], dtype=ser.dtype)
result = ser.mask(cond, other)
tm.assert_series_equal(result, excepted)
def test_mask_inplace_no_other():
# GH#51685
df = DataFrame({"a": [1.0, 2.0], "b": ["x", "y"]})
cond = DataFrame({"a": [True, False], "b": [False, True]})
df.mask(cond, inplace=True)
expected = DataFrame({"a": [np.nan, 2], "b": ["x", np.nan]})
tm.assert_frame_equal(df, expected)
| TestDataFrameMask |
python | ray-project__ray | python/ray/serve/_private/benchmarks/streaming/streaming_grpc_throughput.py | {
"start": 483,
"end": 1418
} | class ____:
async def __init__(self, tokens_per_request, socket_type, tempdir):
# Switch off logging to minimize its impact
logging.getLogger("ray").setLevel(logging.WARNING)
logging.getLogger("ray.serve").setLevel(logging.WARNING)
self.server = await self.start_server(tokens_per_request, socket_type, tempdir)
print("gRPC server started!")
@staticmethod
async def start_server(tokens_per_request, socket_type, tempdir):
server = grpc.aio.server(futures.ThreadPoolExecutor(max_workers=1))
addr, server_creds, _ = _gen_addr_creds(socket_type, tempdir)
server.add_secure_port(addr, server_creds)
await server.start()
test_server_pb2_grpc.add_GRPCTestServerServicer_to_server(
TestGRPCServer(tokens_per_request), server
)
return server
# @ray.remote(runtime_env=GRPC_DEBUG_RUNTIME_ENV)
@ray.remote
| EndpointActor |
python | getsentry__sentry | src/sentry/core/endpoints/organization_auditlogs.py | {
"start": 1472,
"end": 3067
} | class ____(ControlSiloOrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ENTERPRISE
permission_classes = (OrganizationAuditPermission,)
def get(
self,
request: Request,
organization_context: RpcUserOrganizationContext,
organization: RpcOrganization,
) -> Response:
queryset = AuditLogEntry.objects.filter(organization_id=organization.id).select_related(
"actor"
)
serializer = AuditLogQueryParamSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
query = serializer.validated_data
if "actor" in query:
queryset = queryset.filter(actor=query["actor"])
if "event" in query:
if query.get("event") is None:
queryset = queryset.none()
else:
queryset = queryset.filter(event=query["event"])
# Handle date filtering
start, end = get_date_range_from_stats_period(request.GET, optional=True)
if start and end:
queryset = queryset.filter(datetime__range=(start, end))
response = self.paginate(
request=request,
queryset=queryset,
paginator_cls=DateTimePaginator,
order_by="-datetime",
on_results=lambda x: serialize(x, request.user),
)
response.data = {"rows": response.data, "options": audit_log.get_api_names()}
return response
| OrganizationAuditLogsEndpoint |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 27864,
"end": 29251
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
import sklearn.naive_bayes
if type(y) in ("binary", "multiclass"):
kf = sklearn.model_selection.StratifiedKFold(n_splits=5)
else:
kf = sklearn.model_selection.KFold(n_splits=5)
accuracy = 0.0
for train, test in kf.split(X, y):
nb = sklearn.naive_bayes.GaussianNB()
if len(y.shape) == 1 or y.shape[1] == 1:
nb.fit(
X.iloc[train] if hasattr(X, "iloc") else X[train],
y.iloc[train] if hasattr(y, "iloc") else y[train],
)
else:
nb = OneVsRestClassifier(nb)
nb.fit(
X.iloc[train] if hasattr(X, "iloc") else X[train],
y.iloc[train] if hasattr(y, "iloc") else y[train],
)
predictions = nb.predict(
X.iloc[test] if hasattr(X, "iloc") else X[test],
)
accuracy += sklearn.metrics.accuracy_score(
predictions,
y.iloc[test] if hasattr(y, "iloc") else y[test],
)
return accuracy / 5
def _calculate_sparse(self, X, y, logger, feat_type):
return np.NaN
# Cart learner instead of C5.0
@metafeatures.define("LandmarkDecisionTree")
| LandmarkNaiveBayes |
python | celery__celery | celery/utils/time.py | {
"start": 1834,
"end": 3798
} | class ____(tzinfo):
"""Local time implementation. Provided in _Zone to the app when `enable_utc` is disabled.
Otherwise, _Zone provides a UTC ZoneInfo instance as the timezone implementation for the application.
Note:
Used only when the :setting:`enable_utc` setting is disabled.
"""
_offset_cache: dict[int, tzinfo] = {}
def __init__(self) -> None:
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
super().__init__()
def __repr__(self) -> str:
return f'<LocalTimezone: UTC{int(self.DSTOFFSET.total_seconds() / 3600):+03d}>'
def utcoffset(self, dt: datetime) -> timedelta:
return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET
def dst(self, dt: datetime) -> timedelta:
return self.DSTDIFF if self._isdst(dt) else ZERO
def tzname(self, dt: datetime) -> str:
return _time.tzname[self._isdst(dt)]
def fromutc(self, dt: datetime) -> datetime:
# The base tzinfo class no longer implements a DST
# offset aware .fromutc() in Python 3 (Issue #2306).
offset = int(self.utcoffset(dt).seconds / 60.0)
try:
tz = self._offset_cache[offset]
except KeyError:
tz = self._offset_cache[offset] = datetime_timezone(
timedelta(minutes=offset))
return tz.fromutc(dt.replace(tzinfo=tz))
def _isdst(self, dt: datetime) -> bool:
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
| LocalTimezone |
python | encode__django-rest-framework | tests/test_serializer_lists.py | {
"start": 2017,
"end": 7192
} | class ____:
"""
Tests for using a ListSerializer containing another serializer.
"""
def setup_method(self):
class TestSerializer(serializers.Serializer):
integer = serializers.IntegerField()
boolean = serializers.BooleanField()
def create(self, validated_data):
return BasicObject(**validated_data)
class ObjectListSerializer(serializers.ListSerializer):
child = TestSerializer()
self.Serializer = ObjectListSerializer
def test_validate(self):
"""
Validating a list of dictionaries should return a list of
validated dictionaries.
"""
input_data = [
{"integer": "123", "boolean": "true"},
{"integer": "456", "boolean": "false"}
]
expected_output = [
{"integer": 123, "boolean": True},
{"integer": 456, "boolean": False}
]
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == expected_output
def test_create(self):
"""
Creating from a list of dictionaries should return a list of objects.
"""
input_data = [
{"integer": "123", "boolean": "true"},
{"integer": "456", "boolean": "false"}
]
expected_output = [
BasicObject(integer=123, boolean=True),
BasicObject(integer=456, boolean=False),
]
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert serializer.save() == expected_output
def test_serialize(self):
"""
Serialization of a list of objects should return a list of dictionaries.
"""
input_objects = [
BasicObject(integer=123, boolean=True),
BasicObject(integer=456, boolean=False)
]
expected_output = [
{"integer": 123, "boolean": True},
{"integer": 456, "boolean": False}
]
serializer = self.Serializer(input_objects)
assert serializer.data == expected_output
def test_validate_html_input(self):
"""
HTML input should be able to mock list structures using [x]
style prefixes.
"""
input_data = MultiValueDict({
"[0]integer": ["123"],
"[0]boolean": ["true"],
"[1]integer": ["456"],
"[1]boolean": ["false"]
})
expected_output = [
{"integer": 123, "boolean": True},
{"integer": 456, "boolean": False}
]
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == expected_output
def test_update_allow_custom_child_validation(self):
"""
Update a list of objects thanks custom run_child_validation implementation.
"""
class TestUpdateSerializer(serializers.Serializer):
integer = serializers.IntegerField()
boolean = serializers.BooleanField()
def update(self, instance, validated_data):
instance._data.update(validated_data)
return instance
def validate(self, data):
# self.instance is set to current BasicObject instance
assert isinstance(self.instance, BasicObject)
# self.initial_data is current dictionary
assert isinstance(self.initial_data, dict)
assert self.initial_data["pk"] == self.instance.pk
return super().validate(data)
class ListUpdateSerializer(serializers.ListSerializer):
child = TestUpdateSerializer()
def run_child_validation(self, data):
# find related instance in self.instance list
child_instance = next(o for o in self.instance if o.pk == data["pk"])
# set instance and initial_data for child serializer
self.child.instance = child_instance
self.child.initial_data = data
return super().run_child_validation(data)
def update(self, instance, validated_data):
return [
self.child.update(instance, attrs)
for instance, attrs in zip(self.instance, validated_data)
]
instance = [
BasicObject(pk=1, integer=11, private_field="a"),
BasicObject(pk=2, integer=22, private_field="b"),
]
input_data = [
{"pk": 1, "integer": "123", "boolean": "true"},
{"pk": 2, "integer": "456", "boolean": "false"},
]
expected_output = [
BasicObject(pk=1, integer=123, boolean=True, private_field="a"),
BasicObject(pk=2, integer=456, boolean=False, private_field="b"),
]
serializer = ListUpdateSerializer(instance, data=input_data)
assert serializer.is_valid()
updated_instances = serializer.save()
assert updated_instances == expected_output
| TestListSerializerContainingNestedSerializer |
python | scrapy__scrapy | scrapy/shell.py | {
"start": 1016,
"end": 8887
} | class ____:
relevant_classes: tuple[type, ...] = (Crawler, Spider, Request, Response, Settings)
def __init__(
self,
crawler: Crawler,
update_vars: Callable[[dict[str, Any]], None] | None = None,
code: str | None = None,
):
self.crawler: Crawler = crawler
self.update_vars: Callable[[dict[str, Any]], None] = update_vars or (
lambda x: None
)
self.item_class: type = load_object(crawler.settings["DEFAULT_ITEM_CLASS"])
self.spider: Spider | None = None
self.inthread: bool = not threadable.isInIOThread()
self.code: str | None = code
self.vars: dict[str, Any] = {}
def start(
self,
url: str | None = None,
request: Request | None = None,
response: Response | None = None,
spider: Spider | None = None,
redirect: bool = True,
) -> None:
# disable accidental Ctrl-C key press from shutting down the engine
signal.signal(signal.SIGINT, signal.SIG_IGN)
if url:
self.fetch(url, spider, redirect=redirect)
elif request:
self.fetch(request, spider)
elif response:
request = response.request
self.populate_vars(response, request, spider)
else:
self.populate_vars()
if self.code:
# pylint: disable-next=eval-used
print(eval(self.code, globals(), self.vars)) # noqa: S307
else:
# Detect interactive shell setting in scrapy.cfg
# e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg
# [settings]
# # shell can be one of ipython, bpython or python;
# # to be used as the interactive python console, if available.
# # (default is ipython, fallbacks in the order listed above)
# shell = python
cfg = get_config()
section, option = "settings", "shell"
env = os.environ.get("SCRAPY_PYTHON_SHELL")
shells = []
if env:
shells += env.strip().lower().split(",")
elif cfg.has_option(section, option):
shells += [cfg.get(section, option).strip().lower()]
else: # try all by default
shells += DEFAULT_PYTHON_SHELLS.keys()
# always add standard shell as fallback
shells += ["python"]
start_python_console(
self.vars, shells=shells, banner=self.vars.pop("banner", "")
)
def _schedule(self, request: Request, spider: Spider | None) -> defer.Deferred[Any]:
if is_asyncio_reactor_installed():
# set the asyncio event loop for the current thread
event_loop_path = self.crawler.settings["ASYNCIO_EVENT_LOOP"]
set_asyncio_event_loop(event_loop_path)
def crawl_request(_):
assert self.crawler.engine is not None
self.crawler.engine.crawl(request)
d2 = self._open_spider(request, spider)
d2.addCallback(crawl_request)
d = _request_deferred(request)
d.addCallback(lambda x: (x, spider))
return d
@deferred_f_from_coro_f
async def _open_spider(self, request: Request, spider: Spider | None) -> None:
if self.spider:
return
if spider is None:
spider = self.crawler.spider or self.crawler._create_spider()
self.crawler.spider = spider
assert self.crawler.engine
await self.crawler.engine.open_spider_async(close_if_idle=False)
_schedule_coro(self.crawler.engine._start_request_processing())
self.spider = spider
def fetch(
self,
request_or_url: Request | str,
spider: Spider | None = None,
redirect: bool = True,
**kwargs: Any,
) -> None:
from twisted.internet import reactor
if isinstance(request_or_url, Request):
request = request_or_url
else:
url = any_to_uri(request_or_url)
request = Request(url, dont_filter=True, **kwargs)
if redirect:
request.meta["handle_httpstatus_list"] = SequenceExclude(
range(300, 400)
)
else:
request.meta["handle_httpstatus_all"] = True
response = None
with contextlib.suppress(IgnoreRequest):
response, spider = threads.blockingCallFromThread(
reactor, self._schedule, request, spider
)
self.populate_vars(response, request, spider)
def populate_vars(
self,
response: Response | None = None,
request: Request | None = None,
spider: Spider | None = None,
) -> None:
self.vars["scrapy"] = scrapy
self.vars["crawler"] = self.crawler
self.vars["item"] = self.item_class()
self.vars["settings"] = self.crawler.settings
self.vars["spider"] = spider
self.vars["request"] = request
self.vars["response"] = response
if self.inthread:
self.vars["fetch"] = self.fetch
self.vars["view"] = open_in_browser
self.vars["shelp"] = self.print_help
self.update_vars(self.vars)
if not self.code:
self.vars["banner"] = self.get_help()
def print_help(self) -> None:
print(self.get_help())
def get_help(self) -> str:
b = []
b.append("Available Scrapy objects:")
b.append(
" scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)"
)
for k, v in sorted(self.vars.items()):
if self._is_relevant(v):
b.append(f" {k:<10} {v}")
b.append("Useful shortcuts:")
if self.inthread:
b.append(
" fetch(url[, redirect=True]) "
"Fetch URL and update local objects (by default, redirects are followed)"
)
b.append(
" fetch(req) "
"Fetch a scrapy.Request and update local objects "
)
b.append(" shelp() Shell help (print this help)")
b.append(" view(response) View response in a browser")
return "\n".join(f"[s] {line}" for line in b) + "\n"
def _is_relevant(self, value: Any) -> bool:
return isinstance(value, self.relevant_classes) or is_item(value)
def inspect_response(response: Response, spider: Spider) -> None:
"""Open a shell to inspect the given response"""
# Shell.start removes the SIGINT handler, so save it and re-add it after
# the shell has closed
sigint_handler = signal.getsignal(signal.SIGINT)
Shell(spider.crawler).start(response=response, spider=spider)
signal.signal(signal.SIGINT, sigint_handler)
def _request_deferred(request: Request) -> defer.Deferred[Any]:
"""Wrap a request inside a Deferred.
This function is harmful, do not use it until you know what you are doing.
This returns a Deferred whose first pair of callbacks are the request
callback and errback. The Deferred also triggers when the request
callback/errback is executed (i.e. when the request is downloaded)
WARNING: Do not call request.replace() until after the deferred is called.
"""
request_callback = request.callback
request_errback = request.errback
def _restore_callbacks(result: Any) -> Any:
request.callback = request_callback
request.errback = request_errback
return result
d: defer.Deferred[Any] = defer.Deferred()
d.addBoth(_restore_callbacks)
if request.callback:
d.addCallback(request.callback)
if request.errback:
d.addErrback(request.errback)
request.callback, request.errback = d.callback, d.errback
return d
| Shell |
python | OmkarPathak__pygorithm | tests/test_dynamic_programming.py | {
"start": 652,
"end": 1020
} | class ____(unittest.TestCase):
def test_min_cost_path(self):
matrix = [[5, 3, 10, 17, 1],
[4, 2, 9, 8, 5],
[11, 12, 3, 9, 6],
[1, 3, 4, 2, 10],
[7, 11, 13, 7, 3]]
self.assertEqual(min_cost_path.find_path(matrix), 38)
if __name__ == '__main__':
unittest.main()
| TestMinCostPath |
python | google__pytype | pytype/pyi/parser_test_base.py | {
"start": 188,
"end": 2442
} | class ____(test_base.UnitTest):
"""Base class for pyi parsing tests."""
def setUp(self):
super().setUp()
self.options = parser.PyiOptions(python_version=self.python_version)
def parse(self, src, name=None, version=None, platform="linux"):
if version:
self.options.python_version = version
self.options.platform = platform
version = version or self.python_version
src = textwrap.dedent(src).lstrip()
ast = parser.parse_string(src, name=name, options=self.options)
return ast
def check(
self,
src,
expected=None,
prologue=None,
name=None,
version=None,
platform="linux",
):
"""Check the parsing of src.
This checks that parsing the source and then printing the resulting
AST results in the expected text.
Args:
src: A source string.
expected: Optional expected result string. If not provided, src is used
instead. The special value IGNORE can be used to skip checking the
parsed results against expected text.
prologue: An optional prologue to be prepended to the expected text before
comparison. Useful for imports that are introduced during printing the
AST.
name: The name of the module.
version: A python version tuple (None for default value).
platform: A platform string (defaults to "linux").
Returns:
The parsed pytd.TypeDeclUnit.
"""
ast = self.parse(src, name, version, platform)
actual = pytd_utils.Print(ast)
if expected != IGNORE:
if expected is None:
expected = src
expected = textwrap.dedent(expected).lstrip()
if prologue:
expected = f"{textwrap.dedent(prologue)}\n\n{expected}"
# Allow blank lines at the end of `expected` for prettier tests.
self.assertMultiLineEqual(expected.rstrip(), actual)
return ast
def check_error(self, src, expected_line, message):
"""Check that parsing the src raises the expected error."""
with self.assertRaises(parser.ParseError) as e:
parser.parse_string(textwrap.dedent(src).lstrip(), options=self.options)
self.assertRegex(str(e.exception), re.escape(message))
self.assertEqual(expected_line, e.exception.line)
| ParserTestBase |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/job_snapshot.py | {
"start": 15926,
"end": 16217
} | class ____:
parent_snapshot_id: str
op_selection: Optional[Sequence[str]] = None
resolved_op_selection: Optional[AbstractSet[str]] = None
asset_selection: Optional[AbstractSet[AssetKey]] = None
asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None
| JobLineageSnap |
python | readthedocs__readthedocs.org | readthedocs/integrations/migrations/0004_add_integration_secret.py | {
"start": 150,
"end": 696
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("integrations", "0003_add_missing_model_change_migrations"),
]
operations = [
migrations.AddField(
model_name="integration",
name="secret",
field=models.CharField(
blank=True,
default=None,
help_text="Secret used to validate the payload of the webhook",
max_length=255,
null=True,
),
),
]
| Migration |
python | boto__boto3 | tests/unit/s3/test_transfer.py | {
"start": 9032,
"end": 9297
} | class ____(unittest.TestCase):
def test_on_progress(self):
callback = mock.Mock()
subscriber = ProgressCallbackInvoker(callback)
subscriber.on_progress(bytes_transferred=1)
callback.assert_called_with(1)
| TestProgressCallbackInvoker |
python | mlflow__mlflow | mlflow/tensorflow/callback.py | {
"start": 3966,
"end": 9076
} | class ____(Callback, MlflowModelCheckpointCallbackBase):
"""Callback for automatic Keras model checkpointing to MLflow.
Args:
monitor: In automatic model checkpointing, the metric name to monitor if
you set `model_checkpoint_save_best_only` to True.
save_best_only: If True, automatic model checkpointing only saves when
the model is considered the "best" model according to the quantity
monitored and previous checkpoint model is overwritten.
mode: one of {"min", "max"}. In automatic model checkpointing,
if save_best_only=True, the decision to overwrite the current save file is made
based on either the maximization or the minimization of the monitored quantity.
save_weights_only: In automatic model checkpointing, if True, then
only the model's weights will be saved. Otherwise, the optimizer states,
lr-scheduler states, etc are added in the checkpoint too.
save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. Note that if the saving isn't
aligned to epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
.. code-block:: python
:caption: Example
from tensorflow import keras
import tensorflow as tf
import mlflow
import numpy as np
from mlflow.tensorflow import MlflowModelCheckpointCallback
# Prepare data for a 2-class classification.
data = tf.random.uniform([8, 28, 28, 3])
label = tf.convert_to_tensor(np.random.randint(2, size=8))
model = keras.Sequential(
[
keras.Input([28, 28, 3]),
keras.layers.Flatten(),
keras.layers.Dense(2),
]
)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(0.001),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
mlflow_checkpoint_callback = MlflowModelCheckpointCallback(
monitor="sparse_categorical_accuracy",
mode="max",
save_best_only=True,
save_weights_only=False,
save_freq="epoch",
)
with mlflow.start_run() as run:
model.fit(
data,
label,
batch_size=4,
epochs=2,
callbacks=[mlflow_checkpoint_callback],
)
"""
def __init__(
self,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=False,
save_freq="epoch",
):
Callback.__init__(self)
MlflowModelCheckpointCallbackBase.__init__(
self,
checkpoint_file_suffix=".h5",
monitor=monitor,
mode=mode,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
save_freq=save_freq,
)
self.trainer = None
self.current_epoch = None
self._last_batch_seen = 0
self.global_step = 0
self.global_step_last_saving = 0
def save_checkpoint(self, filepath: str):
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
def on_epoch_begin(self, epoch, logs=None):
self.current_epoch = epoch
def on_train_batch_end(self, batch, logs=None):
# Note that `on_train_batch_end` might be invoked by every N train steps,
# (controlled by `steps_per_execution` argument in `model.compile` method).
# the following logic is similar to
# https://github.com/keras-team/keras/blob/e6e62405fa1b4444102601636d871610d91e5783/keras/callbacks/model_checkpoint.py#L212
add_batches = batch + 1 if batch <= self._last_batch_seen else batch - self._last_batch_seen
self._last_batch_seen = batch
self.global_step += add_batches
if isinstance(self.save_freq, int):
if self.global_step - self.global_step_last_saving >= self.save_freq:
self.check_and_save_checkpoint_if_needed(
current_epoch=self.current_epoch,
global_step=self.global_step,
metric_dict={k: float(v) for k, v in logs.items()},
)
self.global_step_last_saving = self.global_step
def on_epoch_end(self, epoch, logs=None):
if self.save_freq == "epoch":
self.check_and_save_checkpoint_if_needed(
current_epoch=self.current_epoch,
global_step=self.global_step,
metric_dict={k: float(v) for k, v in logs.items()},
)
| MlflowModelCheckpointCallback |
python | huggingface__transformers | tests/models/cwm/test_configuration_cwm.py | {
"start": 4460,
"end": 4810
} | class ____(ConfigTester):
def __init__(self, parent, config_class=None, **kwargs):
super().__init__(parent, config_class=config_class, **kwargs)
def test_config(self):
config_class = CwmConfig
self.config_tester = ConfigTester(self, config_class=config_class)
self.config_tester.run_common_tests()
| CwmConfigTester |
python | python-poetry__poetry | src/poetry/console/logging/io_formatter.py | {
"start": 336,
"end": 2180
} | class ____(logging.Formatter):
_colors: ClassVar[dict[str, str]] = {
"error": "fg=red",
"warning": "fg=yellow",
"debug": "debug",
"info": "fg=blue",
}
def format(self, record: LogRecord) -> str:
if not record.exc_info:
level = record.levelname.lower()
msg = record.msg
if record.name in FORMATTERS:
msg = FORMATTERS[record.name].format(msg)
elif level in self._colors:
msg = f"<{self._colors[level]}>{msg}</>"
record.msg = msg
formatted = super().format(record)
if not POETRY_FILTER.filter(record):
# prefix all lines from third-party packages for easier debugging
formatted = textwrap.indent(
formatted, f"[{_log_prefix(record)}] ", lambda line: True
)
return formatted
def _log_prefix(record: LogRecord) -> str:
prefix = _path_to_package(Path(record.pathname)) or record.module
if record.name != "root":
prefix = ":".join([prefix, record.name])
return prefix
def _path_to_package(path: Path) -> str | None:
"""Return main package name from the LogRecord.pathname."""
prefix: Path | None = None
# Find the most specific prefix in sys.path.
# We have to search the entire sys.path because a subsequent path might be
# a sub path of the first match and thereby a better match.
for syspath in sys.path:
if (
prefix and prefix in (p := Path(syspath)).parents and p in path.parents
) or (not prefix and (p := Path(syspath)) in path.parents):
prefix = p
if not prefix:
# this is unexpected, but let's play it safe
return None
path = path.relative_to(prefix)
return path.parts[0] # main package name
| IOFormatter |
python | dask__distributed | distributed/scheduler.py | {
"start": 29158,
"end": 29439
} | class ____:
"""Lightweight representation of an erred task without any dependency information
or runspec.
See also
--------
TaskState
"""
key: Hashable
timestamp: float
erred_on: set[str]
exception_text: str
traceback_text: str
| ErredTask |
python | bokeh__bokeh | tests/unit/bokeh/plotting/test_figure.py | {
"start": 1623,
"end": 8471
} | class ____:
def test_init(self) -> None:
f0 = bpf.figure(x_axis_type="linear")
assert isinstance(f0, bpf.figure)
with pytest.raises(ValueError, match="linear"): # TODO: ValidationError
bpf.figure(x_axis_type="lnear")
with pytest.raises(AttributeError, match="x_axis_type"):
bpf.figure(x_axis_typ="linear")
def test_basic(self) -> None:
p = bpf.figure()
q = bpf.figure()
q.scatter([1, 2, 3], [1, 2, 3])
assert p != q
r = bpf.figure()
assert p != r
assert q != r
def test_width_height(self) -> None:
p = bpf.figure(width=100, height=120)
assert p.width == 100
assert p.height == 120
def test_xaxis(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.xaxis) == 1
expected = set(p.xaxis)
ax = LinearAxis()
expected.add(ax)
p.above.append(ax)
assert set(p.xaxis) == expected
ax2 = LinearAxis()
expected.add(ax2)
p.above.append(ax2)
assert set(p.xaxis) == expected
p.left.append(LinearAxis())
assert set(p.xaxis) == expected
p.right.append(LinearAxis())
assert set(p.xaxis) == expected
def test_yaxis(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.yaxis) == 1
expected = set(p.yaxis)
ax = LinearAxis()
expected.add(ax)
p.right.append(ax)
assert set(p.yaxis) == expected
ax2 = LinearAxis()
expected.add(ax2)
p.right.append(ax2)
assert set(p.yaxis) == expected
p.above.append(LinearAxis())
assert set(p.yaxis) == expected
p.below.append(LinearAxis())
assert set(p.yaxis) == expected
def test_axis(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.axis) == 2
expected = set(p.axis)
ax = LinearAxis()
expected.add(ax)
p.above.append(ax)
assert set(p.axis) == expected
ax2 = LinearAxis()
expected.add(ax2)
p.below.append(ax2)
assert set(p.axis) == expected
ax3 = LinearAxis()
expected.add(ax3)
p.left.append(ax3)
assert set(p.axis) == expected
ax4 = LinearAxis()
expected.add(ax4)
p.right.append(ax4)
assert set(p.axis) == expected
def test_log_axis(self) -> None:
p = bpf.figure(x_axis_type='log')
p.scatter([1, 2, 3], [1, 2, 3])
assert isinstance(p.x_scale, LogScale)
p = bpf.figure(y_axis_type='log')
p.scatter([1, 2, 3], [1, 2, 3])
assert isinstance(p.y_scale, LogScale)
def test_grid_tickers(self) -> None:
p = bpf.figure()
assert p.xgrid[0].axis == p.xaxis[0]
assert p.xgrid[0].ticker is None
assert p.ygrid[0].axis == p.yaxis[0]
assert p.ygrid[0].ticker is None
def test_xgrid(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.xgrid) == 1
assert p.xgrid[0].dimension == 0
def test_ygrid(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.ygrid) == 1
assert p.ygrid[0].dimension == 1
def test_grid(self) -> None:
p = bpf.figure()
p.scatter([1, 2, 3], [1, 2, 3])
assert len(p.grid) == 2
def test_tools(self) -> None:
TOOLS = "pan,box_zoom,reset,lasso_select"
fig = bpf.figure(tools=TOOLS)
expected = [PanTool, BoxZoomTool, ResetTool, LassoSelectTool]
assert len(fig.tools) == len(expected)
for i, _type in enumerate(expected):
assert isinstance(fig.tools[i], _type)
def test_plot_fill_props(self) -> None:
p = bpf.figure(background_fill_color='red',
background_fill_alpha=0.5,
border_fill_color='blue',
border_fill_alpha=0.8)
assert p.background_fill_color == 'red'
assert p.background_fill_alpha == 0.5
assert p.border_fill_color == 'blue'
assert p.border_fill_alpha == 0.8
p.background_fill_color = 'green'
p.border_fill_color = 'yellow'
assert p.background_fill_color == 'green'
assert p.border_fill_color == 'yellow'
def test_title_kwarg_no_warning(self, recwarn) -> None:
bpf.figure(title="title")
assert len(recwarn) == 0
def test_title_should_accept_Title(self) -> None:
title = Title(text='Great Title')
plot = bpf.figure(title=title)
plot.line([1, 2, 3], [1, 2, 3])
assert plot.title.text == 'Great Title'
def test_title_should_accept_string(self) -> None:
plot = bpf.figure(title='Great Title 2')
plot.line([1, 2, 3], [1, 2, 3])
assert plot.title.text == 'Great Title 2'
def test_columnsource_auto_conversion_from_dict(self) -> None:
p = bpf.figure()
dct = {'x': [1, 2, 3], 'y': [2, 3, 4]}
p.scatter(x='x', y='y', source=dct)
def test_columnsource_auto_conversion_from_pandas(self) -> None:
pd = pytest.importorskip("pandas")
p = bpf.figure()
df = pd.DataFrame({'x': [1, 2, 3], 'y': [2, 3, 4]})
p.scatter(x='x', y='y', source=df)
def test_glyph_method_errors_on_sequence_literals_with_source(self) -> None:
p = bpf.figure()
source = ColumnDataSource({'x': [1, 2, 3], 'y': [2, 3, 4]})
with pytest.raises(RuntimeError, match=r"Expected y to reference fields in the supplied data source."):
p.scatter(x='x', y=[1,2,3], source=source)
with pytest.raises(RuntimeError, match=r"Expected y and line_color to reference fields in the supplied data source."):
p.scatter(x='x', y=[1,2,3], line_color=["red", "green", "blue"], source=source)
with pytest.raises(RuntimeError) as e:
p.scatter(x='x', y=[1,2,3], color=["red", "green", "blue"], source=source)
m = re.search (r"Expected y, (.+), (.+) and (.+) to reference fields in the supplied data source.", str(e.value))
assert m is not None
assert set(m.groups()) == {"fill_color", "hatch_color", "line_color"}
def test_context_menu(self) -> None:
plot = bpf.figure()
assert plot.context_menu == "auto"
plot = bpf.figure(context_menu=None)
assert plot.context_menu is None
menu = Menu()
plot = bpf.figure()
plot.context_menu = menu
assert plot.context_menu == menu
menu = Menu()
plot = bpf.figure(context_menu=menu)
assert plot.context_menu == menu
NONCIRCLE_MARKERS = set(MarkerType) - {"circle"}
| Test_figure |
python | plotly__plotly.py | plotly/graph_objs/volume/colorbar/_tickfont.py | {
"start": 233,
"end": 9913
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.colorbar"
_path_str = "volume.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | walkccc__LeetCode | solutions/282. Expression Add Operators/282.py | {
"start": 0,
"end": 917
} | class ____:
def addOperators(self, num: str, target: int) -> list[str]:
ans = []
def dfs(start: int, prev: int, eval: int, path: list[str]) -> None:
if start == len(num):
if eval == target:
ans.append(''.join(path))
return
for i in range(start, len(num)):
if i > start and num[start] == '0':
return
s = num[start:i + 1]
curr = int(s)
if start == 0:
path.append(s)
dfs(i + 1, curr, curr, path)
path.pop()
else:
for op in ['+', '-', '*']:
path.append(op + s)
if op == '+':
dfs(i + 1, curr, eval + curr, path)
elif op == '-':
dfs(i + 1, -curr, eval - curr, path)
else:
dfs(i + 1, prev * curr, eval - prev + prev * curr, path)
path.pop()
dfs(0, 0, 0, [])
return ans
| Solution |
python | pytorch__pytorch | torch/export/_draft_export.py | {
"start": 7807,
"end": 9055
} | class ____:
def __init__(self) -> None:
self.log_count: dict[int, int] = {}
self.logs: list[tuple[str, dict[str, Any]]] = []
def _hash(self, element: tuple[str, dict[str, Any]]) -> int:
key, data = element
if key == "missing_fake_kernel":
return hash((key, data["op"]))
elif key == "mismatched_fake_kernel":
return hash((key, data["op"], data["reason"]))
elif key == "propagate_real_tensors_provenance":
return hash((key, json.dumps(data["user_stack"])))
elif key == "guard_added":
return hash((key, json.dumps(data["user_stack"])))
elif key == "create_unbacked_symbol":
return hash((key, json.dumps(data["user_stack"])))
return hash((key, json.dumps(data)))
def try_add(self, element: tuple[str, dict[str, str]]) -> bool:
hash_value = self._hash(element)
if hash_value in self.log_count:
self.log_count[hash_value] += 1
return False
self.log_count[hash_value] = 1
self.logs.append(element)
return True
def get_log_count(self, element: tuple[str, dict[str, Any]]) -> int:
return self.log_count[self._hash(element)]
| LogRecord |
python | huggingface__transformers | src/transformers/models/sam3_video/modeling_sam3_video.py | {
"start": 20341,
"end": 20832
} | class ____(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Sam3VideoConfig
base_model_prefix = "sam3_video"
main_input_name = "pixel_values"
input_modalities = ["video", "text"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring
| Sam3VideoPreTrainedModel |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 112013,
"end": 113845
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write(self.xsrf_token)
def post(self):
self.write("ok")
def get_app_kwargs(self):
return dict(
xsrf_cookies=True,
xsrf_cookie_name="__Host-xsrf",
xsrf_cookie_kwargs={"secure": True},
)
def setUp(self):
super().setUp()
self.xsrf_token = self.get_token()
def get_token(self, old_token=None):
if old_token is not None:
headers = self.cookie_headers(old_token)
else:
headers = None
response = self.fetch("/", headers=headers)
response.rethrow()
return native_str(response.body)
def cookie_headers(self, token=None):
if token is None:
token = self.xsrf_token
return {"Cookie": "__Host-xsrf=" + token}
def test_xsrf_fail_no_token(self):
with ExpectLog(gen_log, ".*'_xsrf' argument missing"):
response = self.fetch("/", method="POST", body=b"")
self.assertEqual(response.code, 403)
def test_xsrf_fail_body_no_cookie(self):
with ExpectLog(gen_log, ".*XSRF cookie does not match POST"):
response = self.fetch(
"/",
method="POST",
body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)),
)
self.assertEqual(response.code, 403)
def test_xsrf_success_post_body(self):
response = self.fetch(
"/",
method="POST",
# Note that renaming the cookie doesn't rename the POST param
body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)),
headers=self.cookie_headers(),
)
self.assertEqual(response.code, 200)
| XSRFCookieNameTest |
python | huggingface__transformers | src/transformers/models/speech_to_text/processing_speech_to_text.py | {
"start": 719,
"end": 3108
} | class ____(ProcessorMixin):
r"""
Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a
single processor.
[`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and
[`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more
information.
Args:
feature_extractor (`Speech2TextFeatureExtractor`):
An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`Speech2TextTokenizer`):
An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def __call__(self, *args, **kwargs):
"""
When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's
[`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context
[`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's
[`~Speech2TextTokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
audio = kwargs.pop("raw_speech")
else:
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
__all__ = ["Speech2TextProcessor"]
| Speech2TextProcessor |
python | google__jax | jax/_src/pallas/mosaic/lowering.py | {
"start": 94562,
"end": 149242
} | class ____(Exception):
pass
def _fold_and_get_constant_value(x):
def _fold(x, fuel):
if fuel <= 0:
raise FoldingError()
op_name = getattr(x.owner, "name", None)
binop_folds = {
"arith.maxsi": max,
"arith.minsi": min,
}
if op_name == "arith.constant":
if ir.IntegerType.isinstance(x.type):
return ir.IntegerAttr(x.owner.attributes["value"]).value
elif ir.FloatType.isinstance(x.type):
return ir.FloatAttr(x.owner.attributes["value"]).value
else:
raise ValueError(f"Unsupported constant type: {x.type}")
if op_name in binop_folds:
return binop_folds[op_name](_fold(v, fuel - 1) for v in x.owner.operands)
raise FoldingError()
try:
return _fold(x, 10)
except FoldingError:
return None
@register_lowering_rule(lax.stop_gradient_p)
def _stop_gradient_lowering_rule(_: LoweringRuleContext, x):
return x
@register_lowering_rule(
lax.max_p, ensure_mlir_values=False, kernel_types=[*tpu_core.KernelType]
)
def _max_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.signedinteger):
return arith.maxsi(x, y)
elif jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):
return arith.maxui(x, y)
elif jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.maximumf(x, y)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(
lax.min_p, ensure_mlir_values=False, kernel_types=[*tpu_core.KernelType]
)
def _min_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.signedinteger):
return arith.minsi(x, y)
elif jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):
return arith.minui(x, y)
elif jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.minimumf(x, y)
raise NotImplementedError(aval_out.dtype)
def _reduce_index_helper(
ctx: LoweringRuleContext, x, axes, index_dtype, reduction_kind):
(x_aval,) = ctx.avals_in
(out_aval,) = ctx.avals_out
if x_aval.dtype != jnp.float32:
raise NotImplementedError("Only float32 is supported")
if len(axes) != 1:
raise NotImplementedError("Only single axis reduction supported")
if index_dtype != jnp.int32:
raise NotImplementedError("Only index_dtype=int32 is supported")
axis = axes[0]
# TODO(b/460843515): Support 1D inputs in Mosaic.
is_1d = len(x_aval.shape) == 1
if is_1d:
x_2d_aval = jax_core.ShapedArray((1, *x_aval.shape), x_aval.dtype)
x_2d_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, x_2d_aval
)
out_aval = jax_core.ShapedArray((1, *out_aval.shape), out_aval.dtype)
x = vector.shape_cast(x_2d_type, x)
axis += 1
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
)
result = tpu.reduce_index(out_type, x, axis, reduction_kind)
if is_1d:
return vector.extract(result, [], [0])
return result
@register_lowering_rule(lax.argmax_p, ensure_mlir_values=False)
def _argmax_lowering_rule(ctx: LoweringRuleContext, x, axes, index_dtype):
return _reduce_index_helper(
ctx, x, axes, index_dtype,
ir.Attribute.parse("#tpu.reduction_kind<arg_max>")
)
@register_lowering_rule(lax.argmin_p, ensure_mlir_values=False)
def _argmin_lowering_rule(ctx: LoweringRuleContext, x, axes, index_dtype):
return _reduce_index_helper(
ctx, x, axes, index_dtype,
ir.Attribute.parse("#tpu.reduction_kind<arg_min>")
)
@register_lowering_rule(
lax.sub_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _sub_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.integer):
return arith.subi(x, y)
if jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.subf(x, y)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(
lax.mul_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _mul_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.integer):
return arith.muli(x, y)
if jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.mulf(x, y)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(
lax.div_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _div_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.signedinteger):
return arith.divsi(x, y)
if jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):
return arith.divui(x, y)
elif jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.divf(x, y)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(
lax.rem_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _rem_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.signedinteger):
return arith.remsi(x, y)
if jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):
return arith.remui(x, y)
if jnp.issubdtype(aval_out.dtype, jnp.floating):
return arith.remf(x, y)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(lax.abs_p)
def _abs_lowering_rule(ctx: LoweringRuleContext, x):
(aval_out,) = ctx.avals_out
if jnp.issubdtype(aval_out.dtype, jnp.integer):
return math.absi(x)
if jnp.issubdtype(aval_out.dtype, jnp.floating):
return math.absf(x)
raise NotImplementedError(aval_out.dtype)
@register_lowering_rule(lax.neg_p, ensure_mlir_values=False)
def _neg_lowering_rule(ctx: LoweringRuleContext, x):
(x_aval,) = ctx.avals_in
new_ctx = ctx.replace(
avals_in=(jax_core.ShapedArray((), x_aval.dtype), x_aval),
block_shapes=((), *ctx.block_shapes)
)
return _sub_lowering_rule(new_ctx, np.array(0, dtype=x_aval.dtype), x)
@register_lowering_rule(lax.sign_p, kernel_types=[*tpu_core.KernelType])
def _sign_lowering_rule(ctx: LoweringRuleContext, x):
return lower_fun(
pallas_utils.sign_lowering_helper, multiple_results=False,
)(ctx, x)
@register_lowering_rule(lax.nextafter_p)
def _nextafter_lowering_rule(ctx: LoweringRuleContext, x, y):
return lower_fun(
pallas_utils.nextafter_lowering_helper, multiple_results=False,
)(ctx, x, y)
@register_lowering_rule(lax.rsqrt_p)
def _rsqrt_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.rsqrt(x)
@register_lowering_rule(lax.sqrt_p)
def _sqrt_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.sqrt(x)
@register_lowering_rule(lax.square_p)
def _square_lowering_rule(ctx: LoweringRuleContext, x):
if jnp.issubdtype(ctx.avals_in[0].dtype, jnp.integer):
return arith.muli(x, x)
return arith.mulf(x, x)
@register_lowering_rule(lax.exp_p, kernel_types=[*tpu_core.KernelType])
def _exp_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.exp(x)
@register_lowering_rule(lax.pow_p, ensure_mlir_values=False)
def _pow_lowering_rule(ctx: LoweringRuleContext, x, y):
# jax accepts float base (x) and integer/float exponent (y), and integer
# exponent is casted to float.
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, ctx.avals_out[0]
)
if jnp.issubdtype(ctx.avals_in[1].dtype, jnp.integer):
y = arith.sitofp(out_type, y)
if not isinstance(x, ir.Value) and x == 2.:
return math.exp2(y)
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
return math.powf(x, y)
@register_lowering_rule(lax.integer_pow_p)
def _integer_pow_lowering_rule(ctx: LoweringRuleContext, x, *, y):
return lower_fun(lax_internal._integer_pow, multiple_results=False)(
ctx, x, y=y)
@register_lowering_rule(lax.exp2_p, ensure_mlir_values=False)
def _exp2_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
if ctx.forward_compatible or ctx.is_cloud_tpu_older_than(2025, 7, 26):
# exp2 in JAX lowers to exp(ln2 * x), not to pow2. We match that behavior
# here.
return lower_fun(
lambda x: jnp.exp(jnp.astype(np.log(2), x.dtype) * x),
multiple_results=False,
)(ctx, x)
return math.exp2(x)
@register_lowering_rule(lax.logistic_p)
def _logistic_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
neg_x = arith.negf(x)
exp_neg_x = math.exp(neg_x)
aval_out = ctx.avals_out[0]
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, aval_out
)
if not aval_out.shape:
one = ir_constant(1.0, mlir_type=out_type)
else:
one = vector.broadcast(out_type, ir_constant(1.0))
denom = arith.addf(one, exp_neg_x)
return arith.divf(one, denom)
@register_lowering_rule(lax.sin_p)
def _sin_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.sin(x)
@register_lowering_rule(lax.cos_p)
def _cos_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.cos(x)
@register_lowering_rule(lax.tan_p)
def _tan_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.tan(x)
@register_lowering_rule(lax.tanh_p)
def _tanh_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.tanh(x)
@register_lowering_rule(lax.log_p)
def _log_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.log(x)
@register_lowering_rule(lax.log1p_p)
def _log1p_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return math.log1p(x)
@register_lowering_rule(lax.round_p)
def _round_lowering_rule(ctx: LoweringRuleContext, x, *, rounding_method):
if rounding_method == 0:
return math.round(x)
elif rounding_method == 1:
return math.roundeven(x)
else:
raise NotImplementedError(f"Unsupported rounding method: {rounding_method}")
@register_lowering_rule(lax.ceil_p)
def _ceil_lowering_rule(ctx: LoweringRuleContext, x):
return math.ceil(x)
@register_lowering_rule(lax.floor_p)
def _floor_lowering_rule(ctx: LoweringRuleContext, x):
return math.floor(x)
@register_lowering_rule(lax.clz_p)
def _clz_lowering_rule(ctx: LoweringRuleContext, x):
return math.ctlz(x)
@register_lowering_rule(lax.population_count_p)
def _population_count_lowering_rule(ctx: LoweringRuleContext, x):
aval_out = ctx.avals_out[0]
if not aval_out.shape:
raise ValueError("Population count is not supported on scalars")
return math.ctpop(x)
# Mapping for signed integer comparisons.
_cmpsi_lowering_types = {
lax.eq_p: arith.CmpIPredicate.eq,
lax.ne_p: arith.CmpIPredicate.ne,
lax.lt_p: arith.CmpIPredicate.slt,
lax.le_p: arith.CmpIPredicate.sle,
lax.gt_p: arith.CmpIPredicate.sgt,
lax.ge_p: arith.CmpIPredicate.sge,
}
# Mapping for unsigned integer comparisons.
_cmpui_lowering_types = {
lax.eq_p: arith.CmpIPredicate.eq,
lax.ne_p: arith.CmpIPredicate.ne,
lax.lt_p: arith.CmpIPredicate.ult,
lax.le_p: arith.CmpIPredicate.ule,
lax.gt_p: arith.CmpIPredicate.ugt,
lax.ge_p: arith.CmpIPredicate.uge,
}
# Mapping for floating point comparisons.
_cmpf_lowering_types = {
lax.eq_p: arith.CmpFPredicate.OEQ,
lax.ne_p: arith.CmpFPredicate.ONE,
lax.lt_p: arith.CmpFPredicate.OLT,
lax.le_p: arith.CmpFPredicate.OLE,
lax.gt_p: arith.CmpFPredicate.OGT,
lax.ge_p: arith.CmpFPredicate.OGE,
}
# The relationship between comparison operations on booleans and boolean
# algebra is as follows:
# eq(x, y) = !(x ^ y)
# ne(x, y) = x ^ y
# lt(x, y) = !x && y
# le(x, y) = !x || y
# gt(x, y) = x && !y
# ge(x, y) = x || !y
def _cmp_boolean_lowering_helper(primitive, x: Array, y: Array):
"""A helper function for lowering comparison operations for boolean inputs.
Args:
primitive: A JAX primitive representing a comparison operation, which is
one of the following: `lax.eq_p` (equals), `lax.ne_p` (not equals),
`lax.lt_p` (less than), `lax.le_p` (less than or equal to),
`lax.gt_p` (greater than), or `lax.ge_p` (greater than or equal to).
x: A boolean array representing the first operand in the comparison.
y: A boolean array representing the second operand in the comparison.
Returns:
A boolean array that is the result of applying the comparison operation
between `x` and `y` based on the given primitive.
Raises:
ValueError: If an unsupported comparison primitive is provided.
"""
if primitive == lax.eq_p:
return jnp.logical_not(jnp.logical_xor(x, y))
elif primitive == lax.ne_p:
return jnp.logical_xor(x, y)
elif primitive == lax.lt_p:
return jnp.logical_and(jnp.logical_not(x), y)
elif primitive == lax.le_p:
return jnp.logical_or(jnp.logical_not(x), y)
elif primitive == lax.gt_p:
return jnp.logical_and(x, jnp.logical_not(y))
elif primitive == lax.ge_p:
return jnp.logical_or(x, jnp.logical_not(y))
else:
raise ValueError(f"Unsupported comparison primitive: {primitive}")
def _cmp_lowering_rule(primitive, ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])
x_aval, y_aval = ctx.avals_in
if x_aval.dtype != y_aval.dtype:
raise ValueError(
f"Mixed dtype operands in cmp: {x_aval.dtype}, {y_aval.dtype}"
)
dtype = x_aval.dtype
if jnp.issubdtype(dtype, jnp.bool_):
return lower_fun(
functools.partial(_cmp_boolean_lowering_helper, primitive),
multiple_results=False,
)(ctx, x, y)
if jnp.issubdtype(dtype, jnp.integer):
is_uint = jnp.issubdtype(dtype, jnp.unsignedinteger)
pred = (
_cmpui_lowering_types if is_uint else _cmpsi_lowering_types
)[primitive]
predicate = ir.IntegerAttr.get(ir.IntegerType.get_signless(64), pred)
return arith.cmpi(predicate, x, y)
if jnp.issubdtype(dtype, jnp.floating):
pred = _cmpf_lowering_types[primitive]
predicate = ir.IntegerAttr.get(ir.IntegerType.get_signless(64), pred)
return arith.cmpf(predicate, x, y)
raise NotImplementedError(f"Unsupported dtype in cmp: {dtype}")
for prim in [lax.eq_p, lax.ne_p, lax.lt_p, lax.le_p, lax.gt_p, lax.ge_p]:
register_lowering_rule(prim, kernel_types=[*tpu_core.KernelType])(
functools.partial(_cmp_lowering_rule, prim)
)
@register_lowering_rule(
lax.and_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _and_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return arith.andi(x, y)
@register_lowering_rule(lax.is_finite_p)
def _is_finite_lowering_rule(ctx: LoweringRuleContext, x):
out_aval, = ctx.avals_out
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
)
return _not_lowering_rule(ctx, tpu.weird(out_type, x))
@register_lowering_rule(
lax.or_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _or_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return arith.ori(x, y)
@register_lowering_rule(lax.not_p, kernel_types=[*tpu_core.KernelType])
def _not_lowering_rule(ctx: LoweringRuleContext, x):
# The primitive not_p is lowered to
# https://github.com/openxla/stablehlo/blob/main/docs/spec.md#not
# which is arithmetic for integers and logical for booleans.
# Lowering to:
# xor x, -1
# covers both cases.
out_aval = ctx.avals_out[0]
out_scalar_type = _dtype_to_ir_type(out_aval.dtype)
if not out_aval.shape:
# Create a scalar constant.
minus_one = ir_constant(-1, out_scalar_type)
else:
# Create a vector constant.
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
)
scalar_minus_one = ir.IntegerAttr.get(out_scalar_type, -1)
minus_one = arith.constant(
out_type, ir.DenseElementsAttr.get_splat(out_type, scalar_minus_one)
)
return arith.xori(x, minus_one)
@register_lowering_rule(lax.select_n_p, kernel_types=[*tpu_core.KernelType])
def _select_n_lowering_rule(ctx: LoweringRuleContext, pred, x, *args):
if len(args) > 1:
raise NotImplementedError("select_n only supported with <= 2 arguments")
pred_aval, x_aval = ctx.avals_in[:2]
if pred_aval.dtype != np.dtype(np.bool_):
lower_ctx = LoweringRuleContext(
ctx.lowering_context,
avals_in=[pred_aval],
avals_out=[pred_aval.update(dtype=np.bool_)],
block_shapes=[None],
)
pred = lower_fun(lambda x: x != 0, multiple_results=False)(lower_ctx, pred)
if not args:
return x
# Assume x and y, which we check above.
y, = args
return arith.select(pred, y, x)
def _clamp(min, operand, max):
res = jnp.maximum(operand, min)
return jnp.minimum(res, max)
@register_lowering_rule(lax.clamp_p)
def _clamp_lowering_rule(ctx: LoweringRuleContext, min, operand, max):
"""Compute minimum_p(maximum_p(min, operand), max)."""
return lower_fun(_clamp, multiple_results=False)(ctx, min, operand, max)
def _lower_jaxpr_to_for_loop(ctx: LoweringRuleContext,
jaxpr: jax_core.Jaxpr, start: int | ir.Value,
num_steps: int | ir.Value, consts, *args,
has_loop_index: bool,
unroll: int):
def _run_body(i, args):
if has_loop_index:
lowering_context = ctx.lowering_context.replace(
block_shapes=ctx.block_shapes)
args = jaxpr_subcomp(lowering_context, jaxpr, *consts, i, *args)
else:
del i
lowering_context = ctx.lowering_context.replace(
block_shapes=(
*ctx.block_shapes[: len(consts)],
*ctx.block_shapes[len(consts) + 1 :],
),
)
args = jaxpr_subcomp(lowering_context, jaxpr, *consts, *args)
return args
if (
not isinstance(start, ir.Value)
and not isinstance(num_steps, ir.Value)
and num_steps == unroll
):
# No need for an scf.For. We can just unroll completely
for i in range(start, start + num_steps):
args = _run_body(
ir_constant(i, mlir_type=_dtype_to_ir_type(jnp.int32)), args
)
return args
if unroll != 1:
raise NotImplementedError(
f"Only unroll={num_steps=} and unroll=1 supported. Got {unroll=}.")
lbd = _ensure_mlir_value(start, pallas_core.index_map_grid_aval)
ubd = arith.addi(lbd, _ensure_mlir_value(num_steps, pallas_core.index_map_grid_aval))
step = ir_constant(1, mlir_type=_dtype_to_ir_type(jnp.int32))
for_op = scf.ForOp(lbd, ubd, step, args)
with ir.InsertionPoint(for_op.body):
iv = for_op.induction_variable
inner_args = for_op.inner_iter_args
inner_out = _run_body(iv, inner_args)
scf.yield_(inner_out)
return for_op.results
@register_lowering_rule(
lax.scan_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _scan_lowering_rule(
ctx: LoweringRuleContext,
*args,
jaxpr: jax_core.ClosedJaxpr,
linear: tuple[bool, ...],
length: int,
reverse: bool,
unroll: bool | int,
num_consts: int,
num_carry: int,
_split_transpose: bool,
):
del _split_transpose
# Can only handle fori_loop-like scans
num_extensive = len(args) - num_consts - num_carry
if num_extensive: raise NotImplementedError
if reverse: raise NotImplementedError
del linear, num_extensive, reverse
jaxpr, jaxpr_consts = jaxpr.jaxpr, jaxpr.consts
if jaxpr_consts: raise NotImplementedError
del jaxpr_consts
jaxpr, has_loop_index = pallas_utils.pattern_match_scan_to_fori_loop(
jaxpr, num_consts, num_carry
)
consts, args = split_list(args, [num_consts])
consts_avals, args_avals = split_list(ctx.avals_in, [num_consts])
if has_loop_index:
loop_index_start, *args = args
loop_index_start = loop_index_start
args_avals = args_avals[1:]
else:
loop_index_start = 0
consts = map(_ensure_mlir_value, consts, consts_avals)
args = map(_ensure_mlir_value, args, args_avals)
out = _lower_jaxpr_to_for_loop(
ctx, jaxpr, loop_index_start, length,
consts, *args, has_loop_index=has_loop_index,
unroll=unroll)
if has_loop_index:
out = [ir_constant(length, mlir_type=_dtype_to_ir_type(jnp.int32)), *out]
return out
def _lower_while_via_fori(
ctx: LoweringRuleContext,
*args,
fori_jaxpr,
cond_nconsts,
cond_jaxpr,
body_nconsts,
body_jaxpr,
):
_, body_consts, carry = split_list(args, [cond_nconsts, body_nconsts])
(lb, ub), args = carry[:2], carry[2:]
for_out = _lower_jaxpr_to_for_loop(
ctx.replace(
block_shapes=(
*ctx.block_shapes[: body_nconsts + 1],
*ctx.block_shapes[body_nconsts + 2 :],
),
),
fori_jaxpr,
lb,
arith.subi(ub, lb),
body_consts,
*args,
has_loop_index=True,
unroll=1,
)
return [ub, ub, *for_out]
@register_lowering_rule(lax.while_p, kernel_types=[*tpu_core.KernelType])
def _while_lowering_rule(
ctx: LoweringRuleContext,
*args,
cond_nconsts,
cond_jaxpr,
body_nconsts,
body_jaxpr,
):
# First try to lower via a simpler fori loop, which may optimize better.
fori_jaxpr, _ = pallas_utils.pattern_match_while_to_fori_loop(
cond_jaxpr, cond_nconsts, body_jaxpr, body_nconsts
)
if fori_jaxpr is not None:
return _lower_while_via_fori(
ctx,
*args,
fori_jaxpr=fori_jaxpr,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr,
)
# If we fail conversion to fori, fallback to an ordinary while loop.
cond_consts, body_consts, carry = split_list(
args, [cond_nconsts, body_nconsts]
)
cond_const_block_shapes, body_const_block_shapes, carry_block_shapes = (
split_list(ctx.block_shapes, [cond_nconsts, body_nconsts])
)
carry_types = [a.type for a in carry]
while_op = scf.WhileOp(carry_types, carry)
before_block = while_op.before.blocks.append(*carry_types)
with ir.InsertionPoint.at_block_begin(before_block):
cond_args = [*cond_consts, *before_block.arguments]
[cond] = jaxpr_subcomp(
ctx.lowering_context.replace(
block_shapes=[*cond_const_block_shapes, *carry_block_shapes]
),
cond_jaxpr.jaxpr,
*cond_args,
)
scf.condition(cond, before_block.arguments)
after_block = while_op.after.blocks.append(*carry_types)
with ir.InsertionPoint.at_block_begin(after_block):
body_args = [*body_consts, *after_block.arguments]
loop_out = jaxpr_subcomp(
ctx.lowering_context.replace(
block_shapes=[*body_const_block_shapes, *carry_block_shapes],
),
body_jaxpr.jaxpr,
*body_args,
)
if loop_out:
scf.yield_(loop_out)
return list(while_op.results)
@register_lowering_rule(lax.cond_p, kernel_types=[*tpu_core.KernelType])
def _cond_lowering_rule(ctx: LoweringRuleContext, *args, branches, **params):
index, *args = args
constant_index = _fold_and_get_constant_value(index)
if constant_index is not None:
return jaxpr_subcomp(
ctx.lowering_context.replace(block_shapes=ctx.block_shapes[1:]), branches[constant_index].jaxpr, *args
)
aval_to_ir_type_with_fn = functools.partial(
aval_to_ir_type, ctx.lowering_context.dynamic_shape_replacement_fn
)
out_types = map(aval_to_ir_type_with_fn, ctx.avals_out)
pred = arith.cmpi(
arith.CmpIPredicate.ne, index, ir_constant(0, index.type)
)
if_op = scf.IfOp(pred, out_types, hasElse=True)
lowering_context = ctx.lowering_context.replace(
block_shapes=ctx.block_shapes[1:],
)
with ir.InsertionPoint(if_op.then_block):
# TODO(b/300272065): Use `scf.IndexSwitchOp` instead of a cascade of
# if/else.
if len(branches) > 2:
out = _cond_lowering_rule(
ctx,
arith.subi(index, ir_constant(1, index.type)),
*args,
branches=branches[1:],
)
else:
out = jaxpr_subcomp(lowering_context, branches[1].jaxpr, *args)
scf.yield_(out)
with ir.InsertionPoint(if_op.else_block):
out = jaxpr_subcomp(lowering_context, branches[0].jaxpr, *args)
scf.yield_(out)
return if_op.results
@register_lowering_rule(pjit.jit_p, kernel_types=[*tpu_core.KernelType])
def _pjit_lowering_rule(ctx: LoweringRuleContext, *args, jaxpr, **_):
lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)
return jaxpr_subcomp(lowering_context, jaxpr.jaxpr, *args)
@register_lowering_rule(pjit.reshard_p)
def _reshard_lowering_rule(ctx: LoweringRuleContext, x, dst_sharding):
return x
@register_lowering_rule(custom_derivatives.custom_jvp_call_p)
def _custom_jvp_call_lowering_rule(
ctx: LoweringRuleContext,
*args,
call_jaxpr: jax_core.ClosedJaxpr,
jvp_jaxpr_fun: lu.WrappedFun,
num_consts: int,
symbolic_zeros: bool,
):
del jvp_jaxpr_fun
if symbolic_zeros: raise NotImplementedError
if num_consts: raise NotImplementedError
if call_jaxpr.consts: raise NotImplementedError
lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)
return jaxpr_subcomp(lowering_context, call_jaxpr.jaxpr, *args)
@register_lowering_rule(custom_derivatives.custom_vjp_call_p)
def _custom_vjp_call_lowering_rule(
ctx: LoweringRuleContext,
*args,
call_jaxpr,
fwd_jaxpr_thunk,
out_trees,
symbolic_zeros,
bwd,
num_consts,
):
if num_consts: raise NotImplementedError
lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)
return jaxpr_subcomp(lowering_context, call_jaxpr.jaxpr, *args)
@register_lowering_rule(debugging.debug_callback_p)
def _debug_callback_lowering_rule(ctx: LoweringRuleContext, *args, **kwargs):
del ctx, args, kwargs
# No-op debug callbacks in Mosaic for now
return []
@register_lowering_rule(
primitives.program_id_p, kernel_types=[*tpu_core.KernelType]
)
def _program_id_lowering_rule(ctx: LoweringRuleContext, *, axis: int):
if ctx.lowering_context.user_grid_indices is None:
raise ValueError(
f"program id: {axis} was passed, but user did not provide a grid."
)
length = len(ctx.lowering_context.user_grid_indices)
if axis not in range(length):
raise ValueError(
f"user passed in program id with axis: {axis}, but grid only has"
f" length: {length}"
)
return ctx.lowering_context.user_grid_indices[axis]
@register_lowering_rule(
primitives.num_programs_p, kernel_types=[*tpu_core.KernelType]
)
def _num_programs_lowering_rule(ctx: LoweringRuleContext, *, axis: int):
vmapped_axes = set(ctx.lowering_context.vmapped_dims)
seen_user_axes = 0
for i in range(ctx.lowering_context.grid_rank):
seen_user_axes += int(i not in vmapped_axes)
if seen_user_axes == axis + 1:
break
else:
raise ValueError(
f"user passed in program id with axis: {axis}, but grid only has"
f" length: {ctx.lowering_context.grid_rank}"
)
return tpu.iteration_bound(i)
@register_lowering_rule(tpu_primitives.repeat_p)
def _repeat_lowering_rule(ctx: LoweringRuleContext, x, *, repeats, axis):
(out_aval,) = ctx.avals_out
return tpu.repeat(
aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
),
x,
axis,
repeats,
)
@register_lowering_rule(tpu_primitives.roll_p)
def _roll_lowering_rule(
ctx: LoweringRuleContext, x, shift, *, axis, stride, stride_axis
):
(out_aval,) = ctx.avals_out
return tpu.dynamic_rotate(
aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
),
x,
shift,
axis,
stride=stride,
stride_dimension=stride_axis,
)
@register_lowering_rule(lax.slice_p, kernel_types=[*tpu_core.KernelType])
def _slice_lowering_rule(
ctx: LoweringRuleContext, x, limit_indices, start_indices, strides
):
"""Lowers a slice to vector dialect."""
(aval_out,) = ctx.avals_out
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, aval_out
)
if strides is None:
strides = [1] * len(start_indices)
sizes = np.array(limit_indices) - np.array(start_indices)
return vector.extract_strided_slice(
out_type, x, start_indices, sizes, strides
)
@register_lowering_rule(
lax.xor_p, kernel_types=[*tpu_core.KernelType], ensure_mlir_values=False
)
def _xor_lowering_rule(ctx: LoweringRuleContext, x, y):
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return arith.xori(x, y)
@register_lowering_rule(
lax.shift_left_p,
kernel_types=[*tpu_core.KernelType],
ensure_mlir_values=False,
)
def _shift_left_lowering_rule(ctx: LoweringRuleContext, x, d):
x, d = _bcast(x, d, *ctx.avals_in, *ctx.avals_out)
return arith.shli(x, d)
@register_lowering_rule(
lax.shift_right_arithmetic_p,
kernel_types=[*tpu_core.KernelType],
ensure_mlir_values=False,
)
def _shift_right_arithmetic_lowering_rule(ctx: LoweringRuleContext, x, d):
x, d = _bcast(x, d, *ctx.avals_in, *ctx.avals_out)
return arith.shrsi(x, d)
@register_lowering_rule(
lax.shift_right_logical_p,
kernel_types=[*tpu_core.KernelType],
ensure_mlir_values=False,
)
def _shift_right_logical_lowering_rule(ctx: LoweringRuleContext, x, d):
x, d = _bcast(x, d, *ctx.avals_in, *ctx.avals_out)
return arith.shrui(x, d)
@register_lowering_rule(lax.erf_inv_p)
def _erf_inv_lowering_rule(ctx: LoweringRuleContext, x):
return lower_fun(
pallas_utils.erf_inv_lowering_helper, multiple_results=False,
)(ctx, x)
@register_lowering_rule(primitives.reciprocal_p)
def _reciprocal_lowering_rule(ctx: LoweringRuleContext, x, *, approx):
if not isinstance(x.type.element_type, ir.F32Type):
raise ValueError("Only float32 is supported.")
return tpu.reciprocal(x, approx=approx)
@register_lowering_rule(tpu_primitives.stochastic_round_p)
def _stochastic_round_lowering_rule(
ctx: LoweringRuleContext, x, random_bits, *, target_dtype
):
if not isinstance(x.type.element_type, ir.F32Type):
raise ValueError("Only float32 input is supported.")
if target_dtype not in [
jnp.bfloat16,
jnp.float8_e5m2,
jnp.float8_e4m3fn,
jnp.float8_e4m3b11fnuz,
]:
raise ValueError(
"Only bfloat16, float8_e5m2, float8_e4m3fn, and float8_e4m3b11fnuz "
"are supported as target dtypes."
)
(_, in_aval,) = ctx.avals_in
out_type = ir.VectorType.get(
in_aval.shape, mlir.dtype_to_ir_type(jnp.dtype(target_dtype))
)
return tpu.stochastic_convert(out_type, x, random_bits)
def _check_elementwise_packing_dtypes(unpacked_dtype, packed_dtype):
if unpacked_dtype == jnp.float32 and packed_dtype == jnp.bfloat16:
return
if unpacked_dtype == jnp.int32 and packed_dtype in [
jnp.int16, jnp.int8, jnp.int4
]:
return
raise ValueError(
f"Unsupported elementwise packing: {unpacked_dtype} -> {packed_dtype}. "
"Only f32 <-> bf16 and i32 <-> i16/i8/i4 are supported."
)
@register_lowering_rule(tpu_primitives.pack_elementwise_p)
def _pack_elementwise_lowering_rule(
ctx: LoweringRuleContext, *xs, packed_dtype
):
in_aval = ctx.avals_in[0]
_check_elementwise_packing_dtypes(in_aval.dtype, packed_dtype)
packed_ir_type = _dtype_to_ir_type(packed_dtype)
out_type = ir.VectorType.get(
in_aval.shape, _dtype_to_ir_type(jnp.uint32)
)
return tpu.pack_elementwise(out_type, xs, target_type=packed_ir_type)
@register_lowering_rule(tpu_primitives.unpack_elementwise_p)
def _unpack_elementwise_lowering_rule(
ctx: LoweringRuleContext, x, index, packed_dtype, unpacked_dtype
):
in_aval = ctx.avals_in[0]
_check_elementwise_packing_dtypes(unpacked_dtype, packed_dtype)
out_type = ir.VectorType.get(
in_aval.shape, _dtype_to_ir_type(unpacked_dtype)
)
return tpu.unpack_elementwise(
out_type, x, source_type=_dtype_to_ir_type(packed_dtype), index=index)
@register_lowering_rule(tpu_primitives.bitcast_p)
def _bitcast_lowering_rule(ctx: LoweringRuleContext, x, *, ty):
del ty
(out_aval,) = ctx.avals_out
return tpu.bitcast(
aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
),
x,
)
@register_lowering_rule(
lax.bitcast_convert_type_p, kernel_types=[*tpu_core.KernelType]
)
def _bitcast_convert_type_lowering_rule(
ctx: LoweringRuleContext, x, *, new_dtype
):
(in_aval, ) = ctx.avals_in
(out_aval,) = ctx.avals_out
old_bitwidth = dtypes.itemsize_bits(in_aval.dtype)
new_bitwidth = dtypes.itemsize_bits(new_dtype)
if old_bitwidth != new_bitwidth:
raise NotImplementedError("Changing bitwidths not supported.")
return tpu.bitcast(
aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
),
x,
)
def _alloc_value(
aval: jax_core.AbstractValue, *, ctx: LoweringRuleContext
) -> ir.Value:
if isinstance(aval, state.AbstractRef):
if jnp.issubdtype(aval.dtype, pallas_core.semaphore_dtype):
assert aval.memory_space == TPUMemorySpace.SEMAPHORE
memref_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn,
aval,
memory_space=TPUMemorySpace.SEMAPHORE,
)
return tpu.sem_alloc(memref_type)
else:
memref_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn,
aval,
is_kernel_boundary=True,
memory_space=aval.memory_space,
)
assert isinstance(memref_type, ir.MemRefType)
return memref.alloca(memref_type, [], [])
elif isinstance(aval, tpu_core.AbstractSemaphore):
memref_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn,
aval,
memory_space=TPUMemorySpace.SEMAPHORE,
)
return tpu.sem_alloc(memref_type)
raise NotImplementedError(f"Cannot allocate {type(aval)}.")
@register_lowering_rule(primitives.run_scoped_p)
def _run_scoped_lowering_rule(
ctx: LoweringRuleContext,
*consts,
jaxpr,
collective_axes,
alloc_fn=_alloc_value,
):
if collective_axes:
raise NotImplementedError("run_scoped lowering does not support collective axes")
out_type = [
aval_to_ir_type(ctx.lowering_context.dynamic_shape_replacement_fn, aval)
for aval in ctx.avals_out
]
region = tpu.RegionOp(out_type)
in_avals = [v.aval for v in jaxpr.invars]
with ctx.lowering_context.grid_name_context():
jaxpr = pe.convert_constvars_jaxpr(jaxpr)
with ir.InsertionPoint(region.body):
args = map(lambda aval: alloc_fn(aval, ctx=ctx), in_avals)
block_shapes = tuple(a.shape if isinstance(a, state.AbstractRef) else None
for a in in_avals)
block_shapes = tuple(map(_maybe_physicalize_block_shape,
in_avals, block_shapes))
ctx = ctx.lowering_context.replace(
block_shapes=(*ctx.block_shapes, *block_shapes)
)
out = jaxpr_subcomp(ctx, jaxpr, *consts, *args)
tpu.yield_(out)
return region.results
def _device_id_to_logical(
ctx: LoweringRuleContext, device_id,
device_id_type: primitives.DeviceIdType):
logical_device_id, non_mesh_axes = primitives.device_id_to_logical(
ctx.lowering_context.mesh_context,
device_id,
device_id_type,
lambda name: _axis_index_rule(ctx, axis_name=name),
)
core_index = None
if grid_names := ctx.lowering_context.grid_names:
if len(grid_names) > 1:
raise NotImplementedError(
"Unable to determine core axis name if grid_names is more than 1."
)
core_axis_name = grid_names[0]
core_index = non_mesh_axes.pop(core_axis_name, None)
if non_mesh_axes:
raise ValueError(
f"Unrecognized axes in device_id: {non_mesh_axes}"
)
return logical_device_id, core_index
@register_lowering_rule(
primitives.semaphore_read_p, kernel_types=[*tpu_core.KernelType]
)
def _semaphore_read_lowering_rule(
ctx: LoweringRuleContext,
*args,
args_tree,
):
sem_aval, sem_transforms_avals = tree_util.tree_unflatten(args_tree, ctx.avals_in)
primitives.check_sem_avals(
sem_aval,
sem_transforms_avals,
"read",
allowed_semaphore_types={
tpu_core.dma_semaphore,
pallas_core.semaphore,
pallas_core.barrier_semaphore,
pallas_core.SEMAPHORE_INTERPRET_DTYPE,
},
)
sem, transforms = tree_util.tree_unflatten(args_tree, args)
sem, _ = _transform_ref(sem, sem_aval.dtype, sem_aval.shape, transforms)
return tpu.sem_read(sem)
@register_lowering_rule(
primitives.semaphore_signal_p, kernel_types=[*tpu_core.KernelType]
)
def _semaphore_signal_lowering_rule(
ctx: LoweringRuleContext,
*args,
args_tree,
device_id_type: primitives.DeviceIdType,
):
sem_aval, _, _, _, _ = tree_util.tree_unflatten(args_tree, ctx.avals_in)
sem, transforms, value, device_id, core_index = tree_util.tree_unflatten(
args_tree, args
)
sem, _ = _transform_ref(sem, sem_aval.dtype, sem_aval.shape, transforms)
if device_id is not None:
device_id, core_id = _device_id_to_logical(ctx, device_id, device_id_type)
if core_id is not None:
if core_index is not None:
raise ValueError(
"Cannot specify both `core_index` and the core axis in `device_id`."
)
core_index = core_id
tpu.sem_signal(sem, value, device_id=device_id, core_id=core_index)
return []
@register_lowering_rule(
primitives.semaphore_wait_p, kernel_types=[*tpu_core.KernelType]
)
def _semaphore_wait_lowering_rule(ctx: LoweringRuleContext, *args, args_tree):
sem_aval, _, _, _ = tree_util.tree_unflatten(args_tree, ctx.avals_in)
sem, transforms, value, decrement = tree_util.tree_unflatten(args_tree, args)
if not decrement:
raise NotImplementedError("Non-decrementing wait is not supported.")
sem, _ = _transform_ref(sem, sem_aval.dtype, sem_aval.shape, transforms)
tpu.sem_wait(sem, value)
return []
@register_lowering_rule(tpu_primitives.dma_start_p)
def _dma_start_lowering_rule(
ctx: LoweringRuleContext,
*args,
tree,
device_id_type: primitives.DeviceIdType,
priority: int,
add: bool,
):
if add:
raise NotImplementedError("DMA with add=True is not supported.")
(
src_ref,
src_transforms,
dst_ref,
dst_transforms,
sem,
sem_transforms,
src_sem,
src_sem_transforms,
device_id,
) = tree_util.tree_unflatten(tree, args)
(src_ref_aval, _, dst_ref_aval, _, sem_aval, _, src_sem_aval, _, _) = (
tree_util.tree_unflatten(tree, ctx.avals_in)
)
if src_ref_aval.dtype == jnp.bool_:
raise NotImplementedError("DMAs with bool dtypes are not supported.")
block_shapes = tree_util.tree_unflatten(tree, ctx.block_shapes)
src_ref_block_shape, dst_ref_block_shape = block_shapes[0], block_shapes[2]
src_ref, _ = _transform_ref(
src_ref, src_ref_aval.dtype, src_ref_block_shape, src_transforms
)
if src_sem is not None:
src_sem, _ = _transform_ref(
src_sem, src_sem_aval.dtype, src_sem_aval.shape, src_sem_transforms
)
dst_ref, _ = _transform_ref(
dst_ref, dst_ref_aval.dtype, dst_ref_block_shape, dst_transforms
)
sem, _ = _transform_ref(sem, sem_aval.dtype, sem_aval.shape, sem_transforms)
core_id = None
if device_id is not None:
device_id, core_id = _device_id_to_logical(ctx, device_id, device_id_type)
tpu.enqueue_dma(
src_ref,
dst_ref,
sem,
source_semaphore=src_sem,
device_id=device_id,
core_id=core_id,
priority=priority,
)
return []
@register_lowering_rule(tpu_primitives.dma_wait_p)
def _dma_wait_lowering_rule(ctx: LoweringRuleContext, *args, tree,
device_id_type: primitives.DeviceIdType):
(
src,
src_transforms,
dst,
transforms,
sem,
sem_transforms,
_,
_,
device_id,
) = tree_util.tree_unflatten(tree, args)
(src_aval, _, dst_aval, _, sem_aval, _, _, _, _) = tree_util.tree_unflatten(
tree, ctx.avals_in
)
block_shapes = tree_util.tree_unflatten(tree, ctx.block_shapes)
ref_block_shape = block_shapes[2]
src, _ = _transform_ref(src, src_aval.dtype, src_aval.shape, src_transforms)
dst, _ = _transform_ref(dst, dst_aval.dtype, ref_block_shape, transforms)
sem, _ = _transform_ref(sem, sem_aval.dtype, sem_aval.shape, sem_transforms)
core_id = None
if device_id is not None:
device_id, core_id = _device_id_to_logical(ctx, device_id, device_id_type)
if ctx.forward_compatible or ctx.is_cloud_tpu_older_than(2025, 7, 27):
tpu.wait_dma2(sem, src, dst, core_id=core_id)
else:
tpu.wait_dma2(sem, src, dst, device_id=device_id, core_id=core_id)
return []
@register_lowering_rule(lax.axis_index_p, kernel_types=[*tpu_core.KernelType])
def _axis_index_rule(ctx: LoweringRuleContext, *, axis_name: Hashable):
grid_names = ctx.lowering_context.grid_names
if grid_names and axis_name in grid_names:
# We are querying a named axis corresponding to a grid dimension.
return _program_id_lowering_rule(ctx, axis=grid_names.index(axis_name))
# We are querying a named axis corresponding to a mesh dimension.
device_id = tpu.device_id()
mesh_context = ctx.lowering_context.mesh_context
if mesh_context is None:
raise ValueError("Mesh context is not set.")
mesh_shape = mesh_context.mesh_shape
axis_names = mesh_context.axis_names
axis_index = axis_names.index(axis_name)
axis_size = ir_constant(mesh_shape[axis_index])
minor_divisor = ir_constant(
np.prod(mesh_shape[axis_index + 1 :], dtype=np.int32)
)
return arith.remsi(arith.divsi(device_id, minor_divisor), axis_size)
@register_lowering_rule(
tpu_primitives.get_barrier_semaphore_p, kernel_types=[*tpu_core.KernelType]
)
def _get_barrier_semaphore_rule(ctx: LoweringRuleContext):
memref_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, ctx.avals_out[0]
)
return tpu.sem_barrier(memref_type)
@register_lowering_rule(primitives.delay_p)
def _delay_rule(ctx: LoweringRuleContext, nanos: int):
tpu.delay(nanos)
return []
@register_lowering_rule(debugging.debug_print_p)
def _debug_print_rule(
ctx: LoweringRuleContext,
*dyn_args,
fmt: str,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
del partitioned, np_printoptions
if ordered:
raise NotImplementedError("Ordered debug_print is not supported on Pallas.")
args, kwargs = debugging.merge_callback_args(in_tree, dyn_args, static_args)
if kwargs:
raise ValueError(
"Only positional arguments are supported by debug_print on Pallas."
)
is_scalar_inputs = [not aval.shape for aval in ctx.avals_in]
is_all_scalars = all(is_scalar_inputs)
is_single_vector = len(is_scalar_inputs) == 1 and not is_scalar_inputs[0]
if not (is_all_scalars or is_single_vector):
raise ValueError(
"All inputs to debug_print must be all scalars or a single vector, but"
f" got {ctx.avals_in}"
)
# Scalar case.
if is_all_scalars:
if has_placeholders:
primitives.check_debug_print_format(fmt, *args)
if not all(
isinstance(arg.type, ir.IntegerType) and arg.type.width == 32
for arg in args
):
raise TypeError(
"All arguments must be 32-bit integers when using"
" placeholders (`{...}`). If you need to print values of other types,"
" remove placeholders from the format string."
)
# TPU expects $0, $1 etc as placeholders.
fmt = "".join(
f"{text}${spec}{idx}" if field is not None else text
for idx, (text, field, spec, _) in enumerate(
string.Formatter().parse(fmt)
)
)
tpu.log(args, fmt, formatted=has_placeholders)
return ()
# Vector case.
# Copy the array to vmem for logging.
# Note that the shape of the array must be explicitly provided here. This is
# because the underlying implementation aligns shapes to tile boundaries,
# potentially altering the original shape and making it unrecoverable.
if len(ctx.avals_in) != 1:
raise ValueError(
"Only one vector input to debug_print is supported."
)
(aval,) = ctx.avals_in
(arg,) = args
if not has_placeholders or not fmt.endswith("{}"):
raise ValueError("For vector input, the format string must end with {}.")
fmt = fmt[:-2]
region = tpu.RegionOp(())
with ir.InsertionPoint(region.body):
element_type = _dtype_to_ir_type(aval.dtype)
ref_type = ir.MemRefType.get(
aval.shape,
element_type,
memory_space=ir.Attribute.parse("#tpu.memory_space<vmem>"),
)
ref = memref.alloca(ref_type, [], [])
index_type = ir.IndexType.get()
zero = arith.constant(index_type, 0)
indices = [zero] * len(aval.shape)
vector.store(arg, ref, indices)
tpu.log_buffer(ref, aval.shape, fmt)
tpu.yield_([])
return ()
@register_lowering_rule(tpu_primitives.prng_seed_p)
def _prng_seed_lowering_rule(ctx: LoweringRuleContext, *seeds):
del ctx
# In the KeyScalarBundle case we unpack the bundle and set the seed with
# the list of scalars.
if len(seeds) == 1 and isinstance(seeds[0], KeyScalarBundle):
tpu.prng_set_seed_32(seeds[0].scalars)
return []
# For integer seeds, we can set the seed directly as PRNGSeed32Op natively
# takes in a list of integers as input.
all_integers = all(isinstance(seed.type, ir.IntegerType) for seed in seeds)
if not all_integers:
seed_types = [seed.type for seed in seeds]
raise ValueError(f"All seed data must be scalar integers. Got {seed_types}")
tpu.prng_set_seed_32(seeds)
return []
@register_lowering_rule(tpu_primitives.prng_random_bits_p)
def _prng_random_bits_lowering_rule(ctx: LoweringRuleContext, *, shape):
if len(shape) <= 1:
# TODO(b/342054464): Support implicit dims for PRNGRandomBitsOp.
raise NotImplementedError("random_bits only supports rank>=2 outputs.")
out_aval = ctx.avals_out[0]
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, out_aval
)
return tpu.prng_random_bits(out_type)
@register_lowering_rule(prng.random_seed_p)
def random_seed_lowering(ctx: LoweringRuleContext, seeds, *, impl):
seed_lowering = lower_fun(impl.seed, multiple_results=False)
return seed_lowering(ctx, seeds)
@register_lowering_rule(prng.random_bits_p)
def random_bits_lowering(ctx: LoweringRuleContext, keys, *, bit_width, shape):
assert bit_width == 32, "Only 32-bit PRNG supported."
aval, = ctx.avals_in
assert isinstance(aval.dtype, prng.KeyTy)
impl = aval.dtype._impl
_proxy_fn = impl.random_bits
if not pl_random.is_pallas_impl(impl):
def new_lowering(key, bit_width, shape):
key = jax.random.key_data(key).astype(jnp.uint32)
return impl.random_bits(key, bit_width, shape)
_proxy_fn = new_lowering
bits_lowering = lower_fun(_proxy_fn, multiple_results=False)
return bits_lowering(ctx, keys, bit_width=bit_width, shape=shape)
@register_lowering_rule(prng.random_fold_in_p)
def random_fold_in_lowering(ctx: LoweringRuleContext, keys, msgs):
keys_aval, msgs_aval = ctx.avals_in
assert isinstance(keys_aval.dtype, prng.KeyTy)
impl = keys_aval.dtype._impl
fold_in_lowering = lower_fun(impl.fold_in, multiple_results=False)
if pl_random.is_pallas_impl(impl):
return fold_in_lowering(ctx, keys, msgs)
else:
ctx = dataclasses.replace(ctx,
avals_in=[jax_core.physical_aval(keys_aval), msgs_aval],
avals_out=map(jax_core.physical_aval, ctx.avals_out))
return fold_in_lowering(ctx, keys, msgs)
@register_lowering_rule(prng.random_unwrap_p)
def random_unwrap_lowering(ctx: LoweringRuleContext, key):
keys_aval = ctx.avals_in[0]
assert isinstance(keys_aval.dtype, prng.KeyTy)
impl = keys_aval.dtype._impl
if not pl_random.is_pallas_impl(impl):
return key
raise ValueError(
"key_data not support for Pallas PRNG keys. Use"
" split_pallas_seed instead."
)
@register_lowering_rule(prng.random_wrap_p)
def random_wrap_lowering(ctx: LoweringRuleContext, key_data, *, impl):
del ctx
if not pl_random.is_pallas_impl(impl):
return key_data
raise ValueError(
"wrap_key_data not support for Pallas PRNG keys. Use"
" wrap_pallas_seed instead."
)
@register_lowering_rule(tpu_primitives.split_key_p)
def _split_key_lowering_rule(
ctx: LoweringRuleContext, key_data: KeyScalarBundle
):
return key_data.scalars
@register_lowering_rule(tpu_primitives.join_key_p)
def _join_key_lowering_rule(ctx: LoweringRuleContext, *scalars, impl):
if not pl_random.is_pallas_impl(impl):
return ValueError(f"Can only join Pallas keys. Got impl={impl}")
return KeyScalarBundle(scalars=scalars, key_shape=tuple(impl.key_shape))
@register_lowering_rule(checkify.check_p, kernel_types=[*tpu_core.KernelType])
def _check_lowering_rule(
ctx: LoweringRuleContext, *err_args, err_tree, debug
):
del ctx # Unused.
if not debug:
raise NotImplementedError(
"Non-debug checks are not supported by the Mosaic backend."
" Functionalize them via `jax.experimental.checkify`."
)
if not pallas_helpers.debug_checks_enabled():
return []
error = jax.tree.unflatten(err_tree, err_args)
[pred] = error._pred.values()
[exception_tree] = error._metadata.values()
[payload] = error._payload.values()
exception = jax.tree.unflatten(exception_tree, payload)
assert isinstance(exception, checkify.FailedCheckError)
assert isinstance(exception, checkify.FailedCheckError)
# check_p has an inverted predicate compared to assert, so we need to compute
# ``not pred`` here.
minus_one = ir_constant(-1, _dtype_to_ir_type(jnp.bool))
not_pred = arith.xori(pred, minus_one)
cf.assert_(not_pred, exception.fmt_string)
return []
@register_lowering_rule(prng.threefry2x32_p)
def _threefry2x32_lowering(ctx: LoweringRuleContext, k1, k2, m1, m2):
def _lower_fun(k1, k2, m1, m2):
with jax.named_scope("threefry2x32"):
res = prng._threefry2x32_lowering(k1, k2, m1, m2, use_rolled_loops=False)
return res
threefry_lowering = lower_fun(_lower_fun, multiple_results=True)
return threefry_lowering(ctx, k1, k2, m1, m2)
@register_lowering_rule(prng.iota_2x32_shape_p)
def _iota_2x32_shape_lowering(ctx: LoweringRuleContext, *, shape):
total_elements = np.prod(shape)
if total_elements > np.iinfo(jnp.int32).max:
raise NotImplementedError(f"Iota with >{np.iinfo(jnp.int32).max} items.")
def _lower_fun(shape):
iota_data = jnp.zeros(shape, dtype=jnp.int32)
multiplier = 1
for dim in range(len(shape)-1, -1, -1):
counts_lo = lax.broadcasted_iota(
dtype=jnp.int32, shape=shape, dimension=dim
)
iota_data += counts_lo * multiplier
multiplier *= shape[dim]
counts_hi = jnp.zeros(shape, dtype=jnp.int32)
return counts_hi, iota_data
iota_lowering = lower_fun(_lower_fun, multiple_results=True)
return iota_lowering(ctx, shape=shape)
@register_lowering_rule(lax.pad_p)
def _pad_lowering_rule(ctx: LoweringRuleContext, *args, **kwargs):
operand, padding_value = args
padding_config = kwargs["padding_config"]
out_type = aval_to_ir_type(
ctx.lowering_context.dynamic_shape_replacement_fn, ctx.avals_in[0]
)
if not isinstance(out_type, ir.VectorType):
raise NotImplementedError("Only vector types are supported.")
for axis, (low, high, interior) in enumerate(padding_config):
if low == 0 and high == 0 and interior == 0:
continue
def _pad(val, axis=axis):
shape = list(operand.type.shape)
shape[axis] = val
pad_vec_type = ir.VectorType.get(
shape,
operand.type.element_type,
)
if isinstance(padding_value, ir.OpResult):
pad = vector.broadcast(pad_vec_type, padding_value)
else:
scalar_attr = ir.FloatAttr.get(operand.type.element_type, padding_value)
pad = arith.constant(
pad_vec_type,
ir.DenseElementsAttr.get_splat(pad_vec_type, scalar_attr),
)
return pad
if low != 0:
operand = tpu.concatenate([_pad(low), operand], dimension=axis)
if high != 0:
operand = tpu.concatenate([operand, _pad(high)], dimension=axis)
if interior > 0:
raise NotImplementedError("Not implemented: interior padding")
return operand
@register_lowering_rule(control_flow.platform_index_p)
def _platform_index_lowering(
ctx: mlir.LoweringRuleContext,
*,
platforms: BranchesPlatforms,
):
for i, ps in enumerate(platforms):
# note - slightly odd structure here, as platforms is a seq[seq[str]]
if "mosaic" in ps or ps is None:
return ir_constant(i)
raise NotImplementedError(
"No mosaic or default platform indexing rule found."
)
@register_lowering_rule(shape_poly.dim_as_value_p)
def _dim_as_value_lowering(ctx: LoweringRuleContext, *, dim):
placeholder = ctx.lowering_context.dynamic_shape_replacement_fn((dim,))[0]
return ir_constant(placeholder, mlir_type=_dtype_to_ir_type(jnp.int32))
@register_lowering_rule(tpu_primitives.touch_p)
def _touch_lowering_rule(ctx: LoweringRuleContext, x: jax.Array):
del ctx, x
return []
| FoldingError |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_colorado_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_colorado_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_colorado_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidColoradoZip |
python | catalyst-team__catalyst | catalyst/contrib/losses/recsys.py | {
"start": 3604,
"end": 4726
} | class ____(PairwiseLoss):
"""Logistic loss function.
Example:
.. code-block:: python
import torch
from catalyst.contrib.losses import recsys
pos_score = torch.randn(3, requires_grad=True)
neg_score = torch.randn(3, requires_grad=True)
output = recsys.LogisticLoss()(pos_score, neg_score)
output.backward()
"""
def __init__(self) -> None:
super().__init__()
def forward(
self, positive_score: torch.Tensor, negative_score: torch.Tensor
) -> torch.Tensor:
"""Forward propagation method for the logistic loss.
Args:
positive_score: Tensor containing predictions for known positive items.
negative_score: Tensor containing predictions for sampled negative items.
Returns:
computed loss
"""
self._assert_equal_size(positive_score, negative_score)
positives_loss = 1.0 - torch.sigmoid(positive_score)
negatives_loss = torch.sigmoid(negative_score)
loss = positives_loss + negatives_loss
return loss.mean()
| LogisticLoss |
python | ray-project__ray | doc/source/serve/doc_code/autoscale_model_comp_example.py | {
"start": 311,
"end": 495
} | class ____:
async def __call__(self) -> str:
start = time.time()
while time.time() - start < 0.2:
pass
return "heavy"
@serve.deployment
| HeavyLoad |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 26642,
"end": 27331
} | class ____(IntegrationBase, unittest.TestCase):
package = 'tests.pkgs.rendererscanapp'
def test_root(self):
res = self.testapp.get('/one', status=200)
self.assertTrue(b'One!' in res.body)
def test_two(self):
res = self.testapp.get('/two', status=200)
self.assertTrue(b'Two!' in res.body)
def test_rescan(self):
self.config.scan('tests.pkgs.rendererscanapp')
app = self.config.make_wsgi_app()
testapp = TestApp(app)
res = testapp.get('/one', status=200)
self.assertTrue(b'One!' in res.body)
res = testapp.get('/two', status=200)
self.assertTrue(b'Two!' in res.body)
| RendererScanAppTest |
python | cherrypy__cherrypy | cherrypy/process/plugins.py | {
"start": 15721,
"end": 16713
} | class ____(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
"""Initialize the PID file plugin."""
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
"""Write a PID file to disk."""
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
with open(self.pidfile, 'wb') as f:
f.write(ntob('%s\n' % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
"""Delete the PID file from disk."""
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
| PIDFile |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 53094,
"end": 57352
} | class ____(Metric, metaclass=abc.ABCMeta):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
"""
def __init__(self,
value,
num_thresholds=200,
class_id=None,
name=None,
dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.class_id = class_id
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
self._thresholds_distributed_evenly = False
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
self._thresholds_distributed_evenly = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
class_id=self.class_id,
sample_weight=sample_weight)
def reset_state(self):
num_thresholds = len(self.thresholds)
confusion_matrix_variables = (self.true_positives, self.true_negatives,
self.false_positives, self.false_negatives)
backend.batch_set_value([
(v, np.zeros((num_thresholds,))) for v in confusion_matrix_variables
])
def get_config(self):
config = {'class_id': self.class_id}
base_config = super(SensitivitySpecificityBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _find_max_under_constraint(self, constrained, dependent, predicate):
"""Returns the maximum of dependent_statistic that satisfies the constraint.
Args:
constrained: Over these values the constraint
is specified. A rank-1 tensor.
dependent: From these values the maximum that satiesfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `tf.greater`.
Returns maximal dependent value, if no value satiesfies the constraint 0.0.
"""
feasible = array_ops.where_v2(predicate(constrained, self.value))
feasible_exists = math_ops.greater(array_ops.size(feasible), 0)
max_dependent = math_ops.reduce_max(array_ops.gather(dependent, feasible))
return array_ops.where_v2(feasible_exists, max_dependent, 0.0)
| SensitivitySpecificityBase |
python | huggingface__transformers | src/transformers/generation/utils.py | {
"start": 14042,
"end": 18738
} | class ____(ModelOutput):
"""
Outputs of encoder-decoder generation models, when using beam methods.
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True`):
Returns the model cache, used to speed up decoding. Different models have a different cache format, check
the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
"""
sequences: torch.LongTensor
sequences_scores: torch.FloatTensor | None = None
scores: tuple[torch.FloatTensor] | None = None
logits: tuple[torch.FloatTensor] | None = None
beam_indices: torch.LongTensor | None = None
encoder_attentions: tuple[torch.FloatTensor] | None = None
encoder_hidden_states: tuple[torch.FloatTensor] | None = None
decoder_attentions: tuple[tuple[torch.FloatTensor]] | None = None
cross_attentions: tuple[tuple[torch.FloatTensor]] | None = None
decoder_hidden_states: tuple[tuple[torch.FloatTensor]] | None = None
past_key_values: Cache | None = None
# Typing shortcuts
GenerateNonBeamOutput = Union[GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput]
GenerateBeamOutput = Union[GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput]
GenerateOutput = Union[GenerateNonBeamOutput, GenerateBeamOutput]
| GenerateBeamEncoderDecoderOutput |
python | ansible__ansible | test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py | {
"start": 121,
"end": 239
} | class ____(object):
def filters(self):
return {
'testfilter2': testfilter2
}
| FilterModule |
python | keras-team__keras | keras/src/backend/torch/trainer.py | {
"start": 17859,
"end": 17985
} | class ____(EpochIterator):
def _get_iterator(self):
return self.data_adapter.get_torch_dataloader()
| TorchEpochIterator |
python | davidhalter__jedi | jedi/inference/analysis.py | {
"start": 1006,
"end": 2271
} | class ____:
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __str__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __eq__(self, other):
return (self.path == other.path and self.name == other.name
and self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
| Error |
python | getsentry__sentry | tests/sentry/integrations/bitbucket_server/test_integration.py | {
"start": 813,
"end": 18358
} | class ____(IntegrationTestCase):
provider = BitbucketServerIntegrationProvider
@cached_property
@assume_test_silo_mode(SiloMode.CONTROL)
def integration(self):
integration = Integration.objects.create(
provider=self.provider.key,
name="Bitbucket Server",
external_id="bitbucket_server:1",
metadata={
"base_url": "https://bitbucket.example.com",
"domain_name": "bitbucket.example.com",
},
)
integration.add_organization(self.organization, self.user)
return integration
def test_config_view(self) -> None:
resp = self.client.get(self.init_path)
assert resp.status_code == 200
resp = self.client.get(self.setup_path)
assert resp.status_code == 200
self.assertContains(resp, "Connect Sentry")
self.assertContains(resp, "Submit</button>")
@responses.activate
def test_validate_url(self) -> None:
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 200
self.assertContains(resp, "Enter a valid URL")
@responses.activate
def test_validate_private_key(self) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=503,
)
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": "hot-garbage",
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 200
self.assertContains(
resp, "Private key must be a valid SSH private key encoded in a PEM format."
)
@responses.activate
def test_validate_consumer_key_length(self) -> None:
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "x" * 201,
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 200
self.assertContains(resp, "Consumer key is limited to 200")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_authentication_request_token_timeout(self, mock_record: MagicMock) -> None:
timeout = ReadTimeout("Read timed out. (read timeout=30)")
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
body=timeout,
)
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 200
self.assertContains(resp, "Setup Error")
self.assertContains(resp, "request token from Bitbucket")
self.assertContains(resp, "Timed out")
assert_failure_metric(
mock_record, "Timed out attempting to reach host: bitbucket.example.com"
)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_authentication_request_token_fails(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=503,
)
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 200
self.assertContains(resp, "Setup Error")
self.assertContains(resp, "request token from Bitbucket")
assert_failure_metric(mock_record, "")
@responses.activate
def test_authentication_request_token_redirect(self) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=200,
content_type="text/plain",
body="oauth_token=abc123&oauth_token_secret=def456",
)
# Start pipeline
self.client.get(self.init_path)
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 302
redirect = (
"https://bitbucket.example.com/plugins/servlet/oauth/authorize?oauth_token=abc123"
)
assert redirect == resp["Location"]
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_authentication_access_token_failure(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=200,
content_type="text/plain",
body="oauth_token=abc123&oauth_token_secret=def456",
)
error_msg = "<html>it broke</html>"
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/access-token",
status=500,
content_type="text/plain",
body=error_msg,
)
# Get config page
resp = self.client.get(self.init_path)
assert resp.status_code == 200
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 302
assert resp["Location"]
resp = self.client.get(self.setup_path + "?oauth_token=xyz789")
assert resp.status_code == 200
self.assertContains(resp, "Setup Error")
self.assertContains(resp, "access token from Bitbucket")
assert_failure_metric(mock_record, error_msg)
def install_integration(self):
# Get config page
resp = self.client.get(self.setup_path)
assert resp.status_code == 200
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "sentry-bot",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 302
assert resp["Location"]
resp = self.client.get(self.setup_path + "?oauth_token=xyz789")
assert resp.status_code == 200
return resp
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_authentication_verifier_expired(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=200,
content_type="text/plain",
body="oauth_token=abc123&oauth_token_secret=def456",
)
error_msg = "oauth_error=token+expired"
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/access-token",
status=404,
content_type="text/plain",
body=error_msg,
)
# Try getting the token but it has expired for some reason,
# perhaps a stale reload/history navigate.
resp = self.install_integration()
self.assertContains(resp, "Setup Error")
self.assertContains(resp, "access token from Bitbucket")
assert_failure_metric(mock_record, error_msg)
@responses.activate
def test_authentication_success(self) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=200,
content_type="text/plain",
body="oauth_token=abc123&oauth_token_secret=def456",
)
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/access-token",
status=200,
content_type="text/plain",
body="oauth_token=valid-token&oauth_token_secret=valid-secret",
)
responses.add(
responses.POST,
"https://bitbucket.example.com/rest/webhooks/1.0/webhook",
status=204,
body="",
)
self.install_integration()
integration = Integration.objects.get()
assert integration.name == "sentry-bot"
assert integration.metadata["domain_name"] == "bitbucket.example.com"
assert integration.metadata["base_url"] == "https://bitbucket.example.com"
assert integration.metadata["verify_ssl"] is False
org_integration = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
assert org_integration.config == {}
idp = IdentityProvider.objects.get(type="bitbucket_server")
identity = Identity.objects.get(
idp=idp, user=self.user, external_id="bitbucket.example.com:sentry-bot"
)
assert identity.data["consumer_key"] == "sentry-bot"
assert identity.data["access_token"] == "valid-token"
assert identity.data["access_token_secret"] == "valid-secret"
assert identity.data["private_key"] == EXAMPLE_PRIVATE_KEY
@responses.activate
def test_setup_external_id_length(self) -> None:
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/request-token",
status=200,
content_type="text/plain",
body="oauth_token=abc123&oauth_token_secret=def456",
)
responses.add(
responses.POST,
"https://bitbucket.example.com/plugins/servlet/oauth/access-token",
status=200,
content_type="text/plain",
body="oauth_token=valid-token&oauth_token_secret=valid-secret",
)
responses.add(
responses.POST,
"https://bitbucket.example.com/rest/webhooks/1.0/webhook",
status=204,
body="",
)
# Start pipeline and go to setup page.
self.client.get(self.setup_path)
# Submit credentials
data = {
"url": "https://bitbucket.example.com/",
"verify_ssl": False,
"consumer_key": "a-very-long-consumer-key-that-when-combined-with-host-would-overflow",
"private_key": EXAMPLE_PRIVATE_KEY,
}
resp = self.client.post(self.setup_path, data=data)
assert resp.status_code == 302
redirect = (
"https://bitbucket.example.com/plugins/servlet/oauth/authorize?oauth_token=abc123"
)
assert redirect == resp["Location"]
resp = self.client.get(self.setup_path + "?oauth_token=xyz789")
assert resp.status_code == 200
integration = Integration.objects.get(provider="bitbucket_server")
assert (
integration.external_id
== "bitbucket.example.com:a-very-long-consumer-key-that-when-combine"
)
def test_source_url_matches(self) -> None:
installation = self.integration.get_installation(self.organization.id)
test_cases = [
(
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=main",
True,
),
(
"https://notbitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=main",
False,
),
(
"https://jianyuan.io",
False,
),
]
for source_url, matches in test_cases:
assert installation.source_url_matches(source_url) == matches
def test_format_source_url(self) -> None:
installation = self.integration.get_installation(self.organization.id)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="TEST/sentry",
url="https://bitbucket.example.com/projects/TEST/repos/sentry/browse",
provider=self.provider.key,
external_id=123,
config={"name": "TEST/sentry", "project": "TEST", "repo": "sentry"},
integration_id=self.integration.id,
)
assert (
installation.format_source_url(
repo, "src/sentry/integrations/bitbucket_server/integration.py", None
)
== "https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py"
)
assert (
installation.format_source_url(
repo, "src/sentry/integrations/bitbucket_server/integration.py", "main"
)
== "https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=main"
)
def test_extract_branch_from_source_url(self) -> None:
installation = self.integration.get_installation(self.organization.id)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="TEST/sentry",
url="https://bitbucket.example.com/projects/TEST/repos/sentry/browse",
provider=self.provider.key,
external_id=123,
config={"name": "TEST/sentry", "project": "TEST", "repo": "sentry"},
integration_id=self.integration.id,
)
# ?at=main
assert (
installation.extract_branch_from_source_url(
repo,
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=main",
)
== "main"
)
# ?at=refs/heads/main
assert (
installation.extract_branch_from_source_url(
repo,
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=refs%2Fheads%2Fmain",
)
== "main"
)
def test_extract_source_path_from_source_url(self) -> None:
installation = self.integration.get_installation(self.organization.id)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="TEST/sentry",
url="https://bitbucket.example.com/projects/TEST/repos/sentry/browse",
provider=self.provider.key,
external_id=123,
config={"name": "TEST/sentry", "project": "TEST", "repo": "sentry"},
integration_id=self.integration.id,
)
test_cases = [
(
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py",
"src/sentry/integrations/bitbucket_server/integration.py",
),
(
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=main",
"src/sentry/integrations/bitbucket_server/integration.py",
),
(
"https://bitbucket.example.com/projects/TEST/repos/sentry/browse/src/sentry/integrations/bitbucket_server/integration.py?at=refs%2Fheads%2Fmain",
"src/sentry/integrations/bitbucket_server/integration.py",
),
]
for source_url, expected in test_cases:
assert installation.extract_source_path_from_source_url(repo, source_url) == expected
| BitbucketServerIntegrationTest |
python | python__mypy | mypy/typeanal.py | {
"start": 105376,
"end": 109902
} | class ____(TypeQuery[list[Type]]):
def query_types(self, types: Iterable[Type]) -> list[Type]:
return self.strategy([t.accept(self) for t in types]) + list(types)
def strategy(self, items: Iterable[list[Type]]) -> list[Type]:
return list(itertools.chain.from_iterable(items))
def make_optional_type(t: Type) -> Type:
"""Return the type corresponding to Optional[t].
Note that we can't use normal union simplification, since this function
is called during semantic analysis and simplification only works during
type checking.
"""
if isinstance(t, ProperType) and isinstance(t, NoneType):
return t
elif isinstance(t, ProperType) and isinstance(t, UnionType):
# Eagerly expanding aliases is not safe during semantic analysis.
items = [item for item in t.items if not isinstance(get_proper_type(item), NoneType)]
return UnionType(items + [NoneType()], t.line, t.column)
else:
return UnionType([t, NoneType()], t.line, t.column)
def validate_instance(t: Instance, fail: MsgCallback, empty_tuple_index: bool) -> bool:
"""Check if this is a well-formed instance with respect to argument count/positions."""
# TODO: combine logic with instantiate_type_alias().
if any(unknown_unpack(a) for a in t.args):
# This type is not ready to be validated, because of unknown total count.
# TODO: is it OK to fill with TypeOfAny.from_error instead of special form?
return False
if t.type.has_type_var_tuple_type:
min_tv_count = sum(
not tv.has_default() and not isinstance(tv, TypeVarTupleType)
for tv in t.type.defn.type_vars
)
correct = len(t.args) >= min_tv_count
if any(
isinstance(a, UnpackType) and isinstance(get_proper_type(a.type), Instance)
for a in t.args
):
correct = True
if not t.args:
if not (empty_tuple_index and len(t.type.type_vars) == 1):
# The Any arguments should be set by the caller.
if empty_tuple_index and min_tv_count:
fail(
f"At least {min_tv_count} type argument(s) expected, none given",
t,
code=codes.TYPE_ARG,
)
return False
elif not correct:
fail(
f"Bad number of arguments, expected: at least {min_tv_count}, given: {len(t.args)}",
t,
code=codes.TYPE_ARG,
)
return False
else:
# We also need to check if we are not performing a type variable tuple split.
unpack = find_unpack_in_list(t.args)
if unpack is not None:
unpack_arg = t.args[unpack]
assert isinstance(unpack_arg, UnpackType)
if isinstance(unpack_arg.type, TypeVarTupleType):
assert t.type.type_var_tuple_prefix is not None
assert t.type.type_var_tuple_suffix is not None
exp_prefix = t.type.type_var_tuple_prefix
act_prefix = unpack
exp_suffix = t.type.type_var_tuple_suffix
act_suffix = len(t.args) - unpack - 1
if act_prefix < exp_prefix or act_suffix < exp_suffix:
fail("TypeVarTuple cannot be split", t, code=codes.TYPE_ARG)
return False
elif any(isinstance(a, UnpackType) for a in t.args):
# A variadic unpack in fixed size instance (fixed unpacks must be flattened by the caller)
fail(message_registry.INVALID_UNPACK_POSITION, t, code=codes.VALID_TYPE)
t.args = ()
return False
elif len(t.args) != len(t.type.type_vars):
# Invalid number of type parameters.
arg_count = len(t.args)
min_tv_count = sum(not tv.has_default() for tv in t.type.defn.type_vars)
max_tv_count = len(t.type.type_vars)
if arg_count and (arg_count < min_tv_count or arg_count > max_tv_count):
fail(
wrong_type_arg_count(min_tv_count, max_tv_count, str(arg_count), t.type.name),
t,
code=codes.TYPE_ARG,
)
t.invalid = True
return False
return True
def find_self_type(typ: Type, lookup: Callable[[str], SymbolTableNode | None]) -> bool:
return typ.accept(HasSelfType(lookup))
| CollectAllInnerTypesQuery |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 17686,
"end": 20482
} | class ____(DagsterType):
"""Define a type in dagster whose typecheck is an isinstance check.
Specifically, the type can either be a single python type (e.g. int),
or a tuple of types (e.g. (int, float)) which is treated as a union.
Examples:
.. code-block:: python
ntype = PythonObjectDagsterType(python_type=int)
assert ntype.name == 'int'
assert_success(ntype, 1)
assert_failure(ntype, 'a')
.. code-block:: python
ntype = PythonObjectDagsterType(python_type=(int, float))
assert ntype.name == 'Union[int, float]'
assert_success(ntype, 1)
assert_success(ntype, 1.5)
assert_failure(ntype, 'a')
Args:
python_type (Union[Type, Tuple[Type, ...]): The dagster typecheck function calls instanceof on
this type.
name (Optional[str]): Name the type. Defaults to the name of ``python_type``.
key (Optional[str]): Key of the type. Defaults to name.
description (Optional[str]): A markdown-formatted string, displayed in tooling.
loader (Optional[DagsterTypeLoader]): An instance of a class that
inherits from :py:class:`~dagster.DagsterTypeLoader` and can map config data to a value of
this type. Specify this argument if you will need to shim values of this type using the
config machinery. As a rule, you should use the
:py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct
these arguments.
"""
def __init__(
self,
python_type: t.Union[t.Type, t.Tuple[t.Type, ...]],
key: t.Optional[str] = None,
name: t.Optional[str] = None,
**kwargs,
):
if isinstance(python_type, tuple):
self.python_type = check.tuple_param(
python_type, "python_type", of_shape=tuple(type for item in python_type)
)
self.type_str = "Union[{}]".format(
", ".join(python_type.__name__ for python_type in python_type)
)
typing_type = t.Union[python_type] # pyright: ignore[reportInvalidTypeArguments]
else:
self.python_type = check.class_param(python_type, "python_type")
self.type_str = cast(str, python_type.__name__)
typing_type = self.python_type
name = check.opt_str_param(name, "name", self.type_str)
key = check.opt_str_param(key, "key", name)
super(PythonObjectDagsterType, self).__init__(
key=key,
name=name,
type_check_fn=isinstance_type_check_fn(python_type, name, self.type_str),
typing_type=typing_type,
**kwargs,
)
| PythonObjectDagsterType |
python | neetcode-gh__leetcode | python/0231-power-of-two.py | {
"start": 300,
"end": 406
} | class ____:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and ((1 << 30) % n) == 0
| Solution |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 5894,
"end": 6454
} | class ____(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
yield r
raise LookupError
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
yield {"processed": [method]}
| GeneratorFailMiddleware |
python | django__django | django/http/response.py | {
"start": 24137,
"end": 24203
} | class ____(HttpResponse):
status_code = 404
| HttpResponseNotFound |
python | google__jax | tests/pallas/gpu_pallas_distributed_test.py | {
"start": 10739,
"end": 24777
} | class ____(TestCase):
def _get_reduction_impl(self, reduction):
match reduction:
case "add":
return jnp.add
case "min":
return jnp.minimum
case "max":
return jnp.maximum
case "and":
return jnp.bitwise_and
case "or":
return jnp.bitwise_or
case "xor":
return jnp.bitwise_xor
case _:
raise ValueError(reduction)
def test_multimem_store_regs(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, sem):
@pl.when(lax.axis_index('x') == 0)
def _store():
output = plgpu.layout_cast(lax.broadcasted_iota(jnp.int32, (128, 128), 1), plgpu.Layout.WGMMA)
plgpu.multimem_store(output, y_ref, 'x')
other_dev_id = 1 - lax.axis_index('x')
pl.semaphore_signal(sem, 1, device_id=other_dev_id)
pl.semaphore_wait(sem)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.int32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
mesh = jax.sharding.Mesh(jax.devices(), ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P("x"), check_vma=False,
)
)()
y = multihost_utils.process_allgather(y, tiled=True)
ref = lax.broadcasted_iota(jnp.int32, (128, 128), 1)
np.testing.assert_array_equal(y, np.concat([ref, ref], axis=0))
def test_multimem_store_tma(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, smem_ref, sem):
@pl.when(lax.axis_index('x') == 0)
def _store():
output = plgpu.layout_cast(lax.broadcasted_iota(jnp.int32, (128, 128), 1), plgpu.Layout.WGMMA)
smem_ref[...] = output
plgpu.copy_smem_to_gmem(smem_ref, plgpu.multicast_ref(y_ref, 'x'))
plgpu.wait_smem_to_gmem(0)
other_dev_id = 1 - lax.axis_index('x')
pl.semaphore_signal(sem, 1, device_id=other_dev_id)
pl.semaphore_wait(sem)
transforms = (plgpu.TilingTransform((8, 32)), plgpu.SwizzleTransform(128))
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.int32),
scratch_shapes=[
plgpu.SMEM((128, 128), jnp.int32, transforms=transforms),
plgpu.SemaphoreType.REGULAR,
],
)
mesh = jax.sharding.Mesh(jax.devices(), ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P("x"), check_vma=False,
)
)()
y = multihost_utils.process_allgather(y, tiled=True)
ref = lax.broadcasted_iota(jnp.int32, (128, 128), 1)
np.testing.assert_array_equal(y, np.concat([ref, ref], axis=0))
@parameterized.parameters(
(jnp.int32, 1, "add"),
(jnp.int32, 1, "min"),
(jnp.int32, 1, "max"),
(jnp.int32, 1, "and"),
(jnp.int32, 1, "or"),
(jnp.int32, 1, "xor"),
(jnp.float32, 1, "add"),
(jnp.float32, 2, "add", True),
(jnp.float32, 4, "add"),
(jnp.float16, 2, "add"),
(jnp.float16, 2, "min"),
(jnp.float16, 4, "max"),
(jnp.float16, 8, "add", True),
(jnp.bfloat16, 2, "max"),
(jnp.bfloat16, 8, "add"),
(jnp.float8_e5m2, 4, "add"),
(jnp.float8_e5m2, 8, "min"),
(jnp.float8_e5m2, 16, "max", True),
(jnp.float8_e4m3fn, 4, "min", True),
(jnp.float8_e4m3fn, 8, "max"),
(jnp.float8_e4m3fn, 16, "add"),
)
def test_multimem_load_reduce(self, dtype, vector_length, reduction, tiled_layout=False):
if dtype in (
jnp.float8_e5m2,
jnp.float8_e4m3fn,
) and not jtu.is_cuda_compute_capability_at_least("10.0"):
self.skipTest("Only works on GPU with capability >= sm100")
if jax.process_index() > 2:
return # Only 2 processes needed.
devices = jax.devices()[:2]
def kernel(x_ref, y_ref, _, sem_ref):
if tiled_layout:
layout = plgpu.Layout.TILED(
plgpu.Tiling(
(
(64, 2 * vector_length),
(16, 2 * vector_length),
(vector_length,),
)
),
warp_dims=(-5,),
lane_dims=(-3, -2),
vector_dim=-1,
)
else:
layout = plgpu.Layout.WG_STRIDED((64, 32), vec_size=vector_length)
y_ref[...] = plgpu.layout_cast(
plgpu.multimem_load_reduce(
x_ref.at[16:-16], collective_axes="x", reduction_op=reduction,
),
layout
)
my_device = lax.axis_index("x")
other_device = 1 - my_device
pl.semaphore_signal(sem_ref, 1, device_id=other_device)
pl.semaphore_wait(sem_ref)
# The rounding we see in low precision types seems to be different from
# what JAX/XLA use.
match jnp.dtype(dtype).itemsize:
case 4:
bound = 800000
case 2:
bound = 128
case 1:
bound = 4
case _:
raise ValueError(f"Unsupported dtype: {dtype}")
x_local = jax.random.randint(
jax.random.key(1234), (128 + 64, 32), dtype=jnp.int32, minval=-bound, maxval=bound,
).astype(dtype)
mesh = jax.sharding.Mesh(devices, ("x",))
x_shard = jax.ShapeDtypeStruct((64 + 32, 32), dtype)
y_shape = jax.ShapeDtypeStruct((64, 32), dtype)
y, _ = jax.jit(
jax.shard_map(
pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=[
pl.BlockSpec(memory_space=plgpu.SMEM),
pl.BlockSpec(memory_space=plgpu.GMEM),
],
out_shape=(y_shape, x_shard),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
# TODO(b/448323639): Without aliasing XLA doesn't actually
# insert the copy that puts the operand in symmetric memory,
# which causes the kernel to crash.
input_output_aliases={0: 1},
),
mesh=mesh,
in_specs=P("x"),
out_specs=P("x"), # Not really, but lets us test.
check_vma=False,
)
)(x_local)
y = multihost_utils.process_allgather(y, tiled=True)
np_reduction = self._get_reduction_impl(reduction)
np.testing.assert_array_equal(
y.astype(jnp.float32),
np.tile(np_reduction(x_local[16:64+16], x_local[64+48:128+48]), (2, 1)),
)
def _test_reduce_scatter(
self,
shape,
dtype,
reduction,
scatter_dimension=0,
tile_size=None,
vec_size=None,
num_blocks=None,
):
if jax.process_index() > 2:
return
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ["x"])
if jnp.issubdtype(dtype, jnp.floating):
x = jax.random.uniform(jax.random.key(42), shape, dtype=dtype, minval=-1.0, maxval=1.0)
else:
x = jax.random.randint(jax.random.key(42), shape, dtype=dtype, minval=-1000, maxval=1000)
def body(x):
return reduce_scatter(
x,
axis_name="x",
scatter_dimension=scatter_dimension,
reduction=reduction,
vec_size=vec_size,
tile_size=tile_size,
num_blocks=num_blocks,
)
spec = P(*([None] * scatter_dimension), "x")
y = jax.jit(
jax.shard_map(
body, mesh=mesh, in_specs=spec, out_specs=spec, check_vma=False
)
)(x)
y = multihost_utils.process_allgather(y, tiled=True)
np_reduction = self._get_reduction_impl(reduction)
split_idx = x.shape[scatter_dimension] // 2
slices_first = [slice(None)] * len(shape)
slices_first[scatter_dimension] = slice(None, split_idx)
slices_second = [slice(None)] * len(shape)
slices_second[scatter_dimension] = slice(split_idx, None)
expected = np_reduction(x[tuple(slices_first)], x[tuple(slices_second)])
tol = 1e-5 if reduction == "add" else 0
np.testing.assert_allclose(y, expected, rtol=tol, atol=tol)
@parameterized.parameters(
(jnp.float32, "add", 1),
(jnp.float16, "add", 2),
(jnp.bfloat16, "add", 2),
(jnp.float16, "min", 4),
(jnp.float16, "max", 8),
(jnp.int32, "add", 1),
)
def test_reduce_scatter(self, dtype, reduction, vec_size):
# 16 rows * 64 cols = 1024 elements = 8 elements per thread
self._test_reduce_scatter(
(1024, 64), dtype, reduction, tile_size=1024, vec_size=vec_size, num_blocks=4
)
def test_reduce_scatter_large_minor_dims(self):
self._test_reduce_scatter(
(512, 32768), jnp.float16, "add", tile_size=8192, vec_size=4, num_blocks=4
)
@parameterized.parameters(2048, 256, None)
def test_reduce_scatter_auto_vec_size(self, tile_size):
self._test_reduce_scatter(
(1024, 64), jnp.float16, "add", tile_size=tile_size, vec_size=None, num_blocks=4
)
@parameterized.parameters(2048, 256, None)
def test_reduce_scatter_auto_vec_size_int(self, tile_size):
self._test_reduce_scatter(
(1024, 64), jnp.int32, "add", tile_size=tile_size, vec_size=None, num_blocks=4
)
@parameterized.parameters(1, 2)
def test_reduce_scatter_different_axes(self, axis):
if axis == 1:
shape = (64, 1024, 32)
tile_size = 2048
else: # axis == 2
shape = (32, 64, 1024)
tile_size = 2048
self._test_reduce_scatter(
shape, jnp.float16, "add", scatter_dimension=axis, tile_size=tile_size, vec_size=None, num_blocks=4
)
@parameterized.parameters(
(jnp.float16, "add"),
(jnp.float32, "add"),
(jnp.bfloat16, "max"),
)
def test_all_reduce(self, dtype, reduction):
"""Test all-reduce functionality when scatter_dimension=None."""
self._test_all_reduce(
(1024, 1024), dtype, reduction, tile_size=512, vec_size=None, num_blocks=4
)
def _test_all_reduce(
self,
shape,
dtype,
reduction,
tile_size=None,
vec_size=None,
num_blocks=None,
):
"""Helper function to test all-reduce functionality."""
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
x = jax.random.normal(jax.random.key(42), (2, *shape), dtype)
def body(x):
return reduce_scatter(
x,
axis_name="x",
scatter_dimension=None, # All-reduce mode
reduction=reduction,
vec_size=vec_size,
tile_size=tile_size,
num_blocks=num_blocks,
)
spec = P("x")
y = jax.jit(
jax.shard_map(
body, mesh=mesh, in_specs=spec, out_specs=spec, check_vma=False
)
)(x)
y = multihost_utils.process_allgather(y, tiled=True)
np_reduction = self._get_reduction_impl(reduction)
expected = np_reduction(x[0], x[1])
tol = 1e-5 if reduction == "add" else 0
for ys in y:
# It seems that the rounding used by the switch is different from what
# XLA uses.
y_rounded = np.nextafter(ys, expected)
np.testing.assert_allclose(y_rounded, expected, rtol=tol, atol=tol)
def _test_all_gather(
self,
shape,
dtype,
gather_dimension=0,
tile_size=None,
vec_size=None,
num_blocks=None,
):
if jax.process_index() > 2:
return
if jnp.issubdtype(dtype, jnp.floating):
x = jax.random.uniform(jax.random.key(42), shape, dtype=dtype, minval=-1.0, maxval=1.0)
else:
x = jax.random.randint(jax.random.key(42), shape, dtype=dtype, minval=-1000, maxval=1000)
def body(x):
return all_gather(
x,
axis_name="x",
gather_dimension=gather_dimension,
vec_size=vec_size,
tile_size=tile_size,
num_blocks=num_blocks,
)
spec = P(*([None] * gather_dimension), "x")
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ["x"])
y = jax.jit(
jax.shard_map(
body, mesh=mesh, in_specs=spec, out_specs=spec, check_vma=False
)
)(x)
y = multihost_utils.process_allgather(y, tiled=True)
repeats = [1] * len(x.shape)
repeats[gather_dimension] = 2
np.testing.assert_array_equal(y, np.tile(x, repeats))
@parameterized.parameters(
(jnp.float32, 1),
(jnp.float16, 2),
(jnp.bfloat16, 2),
(jnp.float16, 4),
(jnp.float16, 8),
(jnp.int32, 1),
)
def test_all_gather(self, dtype, vec_size):
# 16 rows * 64 cols = 1024 elements = 8 elements per thread
self._test_all_gather(
(1024, 64), dtype, tile_size=1024, vec_size=vec_size, num_blocks=4
)
def test_all_gather_large_minor_dims(self):
self._test_all_gather(
(512, 32768), jnp.float16, tile_size=8192, vec_size=4, num_blocks=4
)
@parameterized.parameters(2048, 256, None)
def test_all_gather_auto_vec_size(self, tile_size):
self._test_all_gather(
(1024, 64), jnp.float16, tile_size=tile_size, vec_size=None, num_blocks=4
)
@parameterized.parameters(2048, 256, None)
def test_all_gather_auto_vec_size_int(self, tile_size):
self._test_all_gather(
(1024, 64), jnp.int32, tile_size=tile_size, vec_size=None, num_blocks=4
)
@parameterized.parameters(1, 2)
def test_all_gather_different_axes(self, axis):
if axis == 1:
shape = (64, 1024, 32)
tile_size = 2048
else: # axis == 2
shape = (32, 64, 1024)
tile_size = 2048
self._test_all_gather(
shape, jnp.float16, gather_dimension=axis, tile_size=tile_size, vec_size=None, num_blocks=4
)
if __name__ == '__main__':
# This test doesn't work with the platform allocator, so we override it
# if it's ran alone. If it's part of a larger test suite and the platform
# allocator is used, setUp will skip the test.
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.01'
os.environ['XLA_PYTHON_CLIENT_ALLOCATOR'] = 'default'
jt_multiprocess.main()
| PallasCallMultimemTest |
python | django-extensions__django-extensions | tests/management/commands/test_sqlcreate.py | {
"start": 873,
"end": 1166
} | class ____(TestCase):
"""Test for sqlcreate exception."""
def test_should_raise_CommandError_if_database_is_unknown(self):
with self.assertRaisesRegex(CommandError, "Unknown database unknown"):
call_command("sqlcreate", "--database=unknown")
| SqlcreateExceptionsTests |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 25331,
"end": 25542
} | class ____:
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({"ii": range(N), "bb": [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby("ii").sum()
| SumBools |
python | geekcomputers__Python | nitkarshchourasia/to_sort/django_projects/ToDo_webapp/todo/models.py | {
"start": 93,
"end": 308
} | class ____(models.Model):
title = models.CharField(max_length=100)
details = models.TextField()
date = models.DateTimeField(default=timezone.now)
def __str__(self) -> str:
return self.title
| Todo |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/engines.py | {
"start": 12322,
"end": 13086
} | class ____:
"""Proxy a DBAPI cursor.
Tests can provide subclasses of this to intercept
DBAPI-level cursor operations.
"""
def __init__(self, engine, conn, *args, **kwargs):
self.engine = engine
self.connection = conn
self.cursor = conn.cursor(*args, **kwargs)
def execute(self, stmt, parameters=None, **kw):
if parameters:
return self.cursor.execute(stmt, parameters, **kw)
else:
return self.cursor.execute(stmt, **kw)
def executemany(self, stmt, params, **kw):
return self.cursor.executemany(stmt, params, **kw)
def __iter__(self):
return iter(self.cursor)
def __getattr__(self, key):
return getattr(self.cursor, key)
| DBAPIProxyCursor |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_not_contain_special_characters.py | {
"start": 1080,
"end": 2975
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference the metric.
condition_metric_name = "column_values.not_contain_special_character"
# condition_value_keys are arguments used to determine the value of the metric.
condition_value_keys = ("allowed_characters",)
# This method defines the business logic for evaluating the metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, allowed_characters: list or set, **kwargs):
def not_contain_special_character(val, *special_characters):
special_characters = [
char for char in special_characters if char not in allowed_characters
]
return all(c not in str(val) for c in special_characters)
return column.apply(not_contain_special_character, args=(list(string.punctuation)))
# This method defines the business logic for evaluating the metric when using a SparkExecutionEngine
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, allowed_characters: list or set, **kwargs):
def not_contain_special_character(val, *special_characters):
special_characters = [
char for char in list(string.punctuation) if char not in allowed_characters
]
return all(c not in str(val) for c in special_characters)
# Register the UDF
not_contain_special_character_udf = F.udf(
not_contain_special_character, types.BooleanType()
)
# Apply the UDF to the column
result_column = F.when(
not_contain_special_character_udf(column, F.lit(string.punctuation)), True
).otherwise(False)
return result_column
# This class defines the Expectation itself
| ColumnValuesToNotContainSpecialCharacters |
python | getsentry__sentry | src/sentry/utils/codecs.py | {
"start": 2288,
"end": 2537
} | class ____(Codec[bytes, bytes]):
def encode(self, value: bytes) -> bytes:
return zstandard.ZstdCompressor().compress(value)
def decode(self, value: bytes) -> bytes:
return zstandard.ZstdDecompressor().decompress(value)
| ZstdCodec |
python | django__django | tests/admin_views/models.py | {
"start": 15669,
"end": 16205
} | class ____(models.Model):
DIFFICULTY_CHOICES = [
("beginner", "Beginner Class"),
("intermediate", "Intermediate Class"),
("advanced", "Advanced Class"),
]
title = models.CharField(max_length=100)
materials = models.FileField(upload_to="test_upload")
difficulty = models.CharField(
max_length=20, choices=DIFFICULTY_CHOICES, null=True, blank=True
)
categories = models.ManyToManyField(Category, blank=True)
start_datetime = models.DateTimeField(null=True, blank=True)
| Course |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/basic.py | {
"start": 10841,
"end": 18924
} | class ____(MetaflowCardComponent):
"""
Properties
page_content : a list of MetaflowCardComponents going as task info
final_component: the dictionary returned by the `render` function of this class.
"""
def __init__(
self,
task,
page_title="Task Info",
only_repr=True,
graph=None,
components=[],
runtime=False,
flow=None,
max_artifact_size=None,
):
self._task = task
self._only_repr = only_repr
# Use the global MAX_ARTIFACT_SIZE constant if not specified
self._max_artifact_size = (
max_artifact_size if max_artifact_size is not None else MAX_ARTIFACT_SIZE
)
self._graph = graph
self._components = components
self._page_title = page_title
self.final_component = None
self.page_component = None
self.runtime = runtime
self.flow = flow
def render(self):
"""
Returns:
a dictionary of form:
dict(metadata = {},components= [])
"""
task_data_dict = TaskToDict(
only_repr=self._only_repr, max_artifact_size=self._max_artifact_size
)(self._task, graph=self._graph)
# ignore the name as an artifact
if "name" in task_data_dict["data"]:
del task_data_dict["data"]["name"]
_metadata = dict(version=1, template="defaultCardTemplate")
# try to parse out metaflow version from tags, but let it go if unset
# e.g. if a run came from a local, un-versioned metaflow codebase
try:
_metadata["metaflow_version"] = [
t for t in self._task.parent.parent.tags if "metaflow_version" in t
][0].split("metaflow_version:")[1]
except Exception:
pass
final_component_dict = dict(
metadata=_metadata,
components=[],
)
metadata = [
"stderr",
"stdout",
"created_at",
"finished_at",
"pathspec",
]
tags = self._task.parent.parent.tags
user_info = [t for t in tags if t.startswith("user:")]
task_metadata_dict = {
"Task Created On": task_data_dict["created_at"],
"Task Finished On": task_data_dict["finished_at"],
# Remove Microseconds from timedelta
"Tags": ", ".join(tags),
"Attempt": self._task.current_attempt,
}
if not self.runtime:
task_metadata_dict["Task Duration"] = str(
self._task.finished_at - self._task.created_at
).split(".")[0]
if len(user_info) > 0:
task_metadata_dict["User"] = user_info[0].split("user:")[1]
for m in metadata:
final_component_dict["metadata"][m] = task_data_dict[m]
metadata_table = SectionComponent(
title="Task Metadata",
contents=[
TableComponent(
headers=list(task_metadata_dict.keys()),
data=[list(task_metadata_dict.values())],
vertical=True,
)
],
)
img_components = []
for img_name in task_data_dict["images"]:
img_components.append(
ImageComponent(
src=task_data_dict["images"][img_name], label=img_name
).render()
)
table_comps = []
for tabname in task_data_dict["tables"]:
tab_dict = task_data_dict["tables"][tabname]
tab_title = "Artifact Name: %s" % tabname
sec_tab_comp = [
TableComponent(headers=tab_dict["headers"], data=tab_dict["data"])
]
post_table_md = None
if tab_dict["truncated"]:
tab_title = "Artifact Name: %s (%d columns and %d rows)" % (
tabname,
tab_dict["full_size"][1],
tab_dict["full_size"][0],
)
post_table_md = MarkdownComponent(
"_Truncated - %d rows not shown_"
% ((tab_dict["full_size"][0] - len(tab_dict["data"])))
)
if post_table_md:
sec_tab_comp.append(post_table_md)
table_comps.append(
SectionComponent(
title=tab_title,
contents=sec_tab_comp,
)
)
# ignore the name as a parameter
if "_parameters" not in self._task.parent.parent:
# In case of spin steps, there is no _parameters task
param_ids = []
else:
param_ids = [
p.id
for p in self._task.parent.parent["_parameters"].task
if p.id != "name"
]
if len(param_ids) > 0:
# Extract parameter from the Parameter Task. That is less brittle.
parameter_data = TaskToDict(
only_repr=self._only_repr, runtime=self.runtime
)(self._task.parent.parent["_parameters"].task, graph=self._graph)
param_component = ArtifactsComponent(
data=[parameter_data["data"][pid] for pid in param_ids]
)
else:
param_component = TitleComponent(text="No Parameters")
parameter_table = SectionComponent(
title="Flow Parameters",
contents=[param_component],
).render()
step_func = getattr(self.flow, self._task.parent.id)
code_table = SectionComponent(
title="Task Code",
contents=[
TableComponent(
data=[[PythonCodeComponent(inspect.getsource(step_func)).render()]]
)
],
).render()
# Don't include parameter ids + "name" in the task artifacts
artifactlist = [
task_data_dict["data"][k]
for k in task_data_dict["data"]
if k not in param_ids
]
if len(artifactlist) > 0:
artifact_component = ArtifactsComponent(data=artifactlist).render()
else:
artifact_component = TitleComponent(text="No Artifacts")
artifact_section = SectionComponent(
title="Artifacts", contents=[artifact_component]
).render()
dag_component = SectionComponent(
title="DAG", contents=[DagComponent(data=task_data_dict["graph"]).render()]
).render()
page_contents = []
if len(self._components) > 0:
page_contents.extend(self._components)
page_contents.extend(
[
metadata_table,
code_table,
parameter_table,
artifact_section,
]
)
if len(table_comps) > 0:
table_section = SectionComponent(
title="Tabular Data", contents=table_comps
).render()
page_contents.append(table_section)
if len(img_components) > 0:
img_section = SectionComponent(
title="Image Data",
columns=len(img_components),
contents=img_components,
).render()
page_contents.append(img_section)
page_contents.append(dag_component)
page_component = PageComponent(
title=self._page_title,
contents=page_contents,
).render()
final_component_dict["components"].append(
TitleComponent(text=task_data_dict["pathspec"]).render()
)
final_component_dict["components"].append(page_component)
# These Properties will provide a way to access these components
# once render is finished
# this will Make this object reusable for run level cards.
self.final_component = final_component_dict
self.page_component = page_component
return final_component_dict
| TaskInfoComponent |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/selection.py | {
"start": 619,
"end": 1274
} | class ____:
"""
State of the current selection.
:param original_cursor_position: int
:param type: :class:`~.SelectionType`
"""
def __init__(
self,
original_cursor_position: int = 0,
type: SelectionType = SelectionType.CHARACTERS,
) -> None:
self.original_cursor_position = original_cursor_position
self.type = type
self.shift_mode = False
def enter_shift_mode(self) -> None:
self.shift_mode = True
def __repr__(self) -> str:
return f"{self.__class__.__name__}(original_cursor_position={self.original_cursor_position!r}, type={self.type!r})"
| SelectionState |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 149870,
"end": 154346
} | class ____(Request):
"""
Get histogram data of all the vector metrics and variants in the task
:param task: Task ID
:type task: str
:param samples: The amount of histogram points to return (0 to return all the
points). Optional, the default value is 6000.
:type samples: int
:param key: Histogram x axis to use: iter - iteration number iso_time - event
time as ISO formatted string timestamp - event timestamp as milliseconds since epoch
:type key: ScalarKeyEnum
:param metrics: List of metrics and variants
:type metrics: Sequence[MetricVariants]
"""
_service = "events"
_action = "scalar_metrics_iter_histogram"
_version = "2.20"
_schema = {
"definitions": {
"metric_variants": {
"metric": {"description": "The metric name", "type": "string"},
"type": "object",
"variants": {
"description": "The names of the metric variants",
"items": {"type": "string"},
"type": "array",
},
},
"scalar_key_enum": {
"enum": ["iter", "timestamp", "iso_time"],
"type": "string",
},
},
"properties": {
"key": {
"$ref": "#/definitions/scalar_key_enum",
"description": "Histogram x axis to use:iter - iteration numberiso_time - event time as ISO formatted stringtimestamp - event timestamp as milliseconds since epoch",
},
"metrics": {
"description": "List of metrics and variants",
"items": {"$ref": "#/definitions/metric_variants"},
"type": "array",
},
"samples": {
"description": "The amount of histogram points to return (0 to return all the points). Optional, the default value is 6000.",
"type": "integer",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
samples: Optional[int] = None,
key: Any = None,
metrics: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(ScalarMetricsIterHistogramRequest, self).__init__(**kwargs)
self.task = task
self.samples = samples
self.key = key
self.metrics = metrics
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("samples")
def samples(self) -> Optional[int]:
return self._property_samples
@samples.setter
def samples(self, value: Optional[int]) -> None:
if value is None:
self._property_samples = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "samples", six.integer_types)
self._property_samples = value
@schema_property("key")
def key(self) -> Any:
return self._property_key
@key.setter
def key(self, value: Any) -> None:
if value is None:
self._property_key = None
return
if isinstance(value, six.string_types):
try:
value = ScalarKeyEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "key", enum.Enum)
self._property_key = value
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricVariants.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", MetricVariants, is_array=True)
self._property_metrics = value
| ScalarMetricsIterHistogramRequest |
python | pypa__setuptools | setuptools/depends.py | {
"start": 343,
"end": 5965
} | class ____:
"""A prerequisite to building or installing a distribution"""
def __init__(
self,
name,
requested_version,
module,
homepage: str = '',
attribute=None,
format=None,
) -> None:
if format is None and requested_version is not None:
format = Version
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return f'{self.name}-{self.requested_version}'
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return (
self.attribute is None
or self.format is None
or str(version) != "unknown"
and self.format(version) >= self.requested_version
)
def get_version(
self, paths=None, default: _T | Literal["unknown"] = "unknown"
) -> _T | Literal["unknown"] | None | Any:
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, _p, _i = find_module(self.module, paths)
except ImportError:
return None
if f:
f.close()
return default
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(str(version))
def maybe_close(f):
@contextlib.contextmanager
def empty():
yield
return
if not f:
return empty()
return contextlib.closing(f)
# Some objects are not available on some platforms.
# XXX it'd be better to test assertions about bytecode instead.
if not sys.platform.startswith('java') and sys.platform != 'cli':
def get_module_constant(
module, symbol, default: _T | int = -1, paths=None
) -> _T | int | None | Any:
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (_suffix, _mode, kind) = info = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = _imp.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
imported = _imp.get_module(module, paths, info)
return getattr(imported, symbol, None)
return extract_constant(code, symbol, default)
def extract_constant(
code: CodeType, symbol: str, default: _T | int = -1
) -> _T | int | None | Any:
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
LOAD_CONST = dis.opmap['LOAD_CONST']
const = default
for byte_code in dis.Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
assert arg is not None
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
return None
__all__ += ['get_module_constant', 'extract_constant']
| Require |
python | pytorch__pytorch | test/export/test_upgrader.py | {
"start": 118,
"end": 11344
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
# Register example upgraders dynamically
torch._C._export.register_example_upgraders()
def tearDown(self) -> None:
# Clean up registered upgraders
torch._C._export.deregister_example_upgraders()
def test_nn_module_stack_transformation_from_v0(self):
"""Test that nn_module_stack strings are prepended with 'test_upgrader_' when upgrading from version 0"""
# Create a mock JSON object that simulates version 0 schema
# with nn_module_stack as a string that needs to be upgraded
mock_json = {
"schema_version": {"major": 0, "minor": 0},
"graph_module": {
"graph": {
"nodes": [
{
"target": "aten.add.Tensor",
"inputs": [],
"outputs": [],
"metadata": {
"nn_module_stack": "original_stack_info",
"other_field": "some_value",
},
},
{
"target": "aten.mul.Tensor",
"inputs": [],
"outputs": [],
"metadata": {
"nn_module_stack": "another_stack",
"stack_trace": "some trace",
},
},
]
}
},
}
# Test the upgrader using the Python binding
serialized_json = json.dumps(mock_json)
upgraded_json_str = torch._C._export.upgrade(serialized_json, 2)
upgraded_json = json.loads(upgraded_json_str)
# Verify the schema version was updated (version 0 -> version 2 due to both v0 and v1 upgraders)
self.assertEqual(upgraded_json["schema_version"]["major"], 2)
self.assertEqual(upgraded_json["schema_version"]["minor"], 0)
# Verify nn_module_stack was prepended with "test_upgrader_"
nodes = upgraded_json["graph_module"]["graph"]["nodes"]
# Check first node
first_node_metadata = nodes[0]["metadata"]
nn_stack = first_node_metadata["nn_module_stack"]
self.assertIsInstance(nn_stack, str)
self.assertEqual(nn_stack, "test_upgrader_original_stack_info")
# Other metadata should be unchanged
self.assertEqual(first_node_metadata["other_field"], "some_value")
# Check second node
second_node_metadata = nodes[1]["metadata"]
nn_stack2 = second_node_metadata["nn_module_stack"]
self.assertIsInstance(nn_stack2, str)
self.assertEqual(nn_stack2, "test_upgrader_another_stack")
# Other metadata should be unchanged
self.assertEqual(second_node_metadata["stack_trace"], "some trace")
def test_nn_module_stack_error_handling_invalid_type(self):
"""Test error handling when nn_module_stack is not a string"""
# Test case: nn_module_stack is not a string
mock_json_invalid_type = {
"schema_version": {"major": 0, "minor": 0},
"graph_module": {
"graph": {
"nodes": [
{
"target": "aten.add.Tensor",
"inputs": [],
"outputs": [],
"metadata": {
"nn_module_stack": 42 # Invalid: should be string
},
}
]
}
},
}
with self.assertRaisesRegex(
RuntimeError,
"Error in upgrader 'version_0_upgrader_registered'",
):
serialized_json = json.dumps(mock_json_invalid_type)
torch._C._export.upgrade(serialized_json, 2)
def test_nodes_without_metadata_handled_gracefully(self):
"""Test that nodes without metadata or nn_module_stack are handled gracefully"""
mock_json = {
"schema_version": {"major": 0, "minor": 0},
"graph_module": {
"graph": {
"nodes": [
{
"target": "aten.add.Tensor",
"inputs": [],
"outputs": [],
# No metadata field
},
{
"target": "aten.mul.Tensor",
"inputs": [],
"outputs": [],
"metadata": {
"stack_trace": "some trace"
# No nn_module_stack field
},
},
]
}
},
}
# Should not raise an error
serialized_json = json.dumps(mock_json)
upgraded_json_str = torch._C._export.upgrade(serialized_json, 2)
upgraded_json = json.loads(upgraded_json_str)
# Verify the schema version was updated (version 0 -> version 2 due to both v0 and v1 upgraders)
self.assertEqual(upgraded_json["schema_version"]["major"], 2)
self.assertEqual(upgraded_json["schema_version"]["minor"], 0)
# Verify nodes are unchanged
nodes = upgraded_json["graph_module"]["graph"]["nodes"]
self.assertEqual(len(nodes), 2)
# First node should have no metadata
self.assertNotIn("metadata", nodes[0])
# Second node should have unchanged metadata
self.assertEqual(nodes[1]["metadata"]["stack_trace"], "some trace")
self.assertNotIn("nn_module_stack", nodes[1]["metadata"])
def test_field_renaming_chain_from_v0_complete(self):
"""Test complete field renaming chain from v0: old_test_field -> new_test_field -> new_test_field2"""
mock_json = {
"schema_version": {"major": 0, "minor": 0},
"graph_module": {
"graph": {
"inputs": [],
"outputs": [],
"nodes": [
{
"target": "aten.add.Tensor",
"inputs": [],
"outputs": [],
"metadata": {"nn_module_stack": "test_stack"},
}
],
"old_test_field": "original_value",
"existing_field": "existing_value",
}
},
}
# Test the upgrader using the Python binding
serialized_json = json.dumps(mock_json)
upgraded_json_str = torch._C._export.upgrade(serialized_json, 2)
upgraded_json = json.loads(upgraded_json_str)
# Verify the schema version was updated (version 0 -> version 2 due to both v0 and v1 upgraders)
self.assertEqual(upgraded_json["schema_version"]["major"], 2)
self.assertEqual(upgraded_json["schema_version"]["minor"], 0)
# Verify complete field transformation: old_test_field -> new_test_field -> new_test_field2
graph = upgraded_json["graph_module"]["graph"]
self.assertIn("new_test_field2", graph)
self.assertEqual(graph["new_test_field2"], "original_value")
self.assertNotIn("old_test_field", graph)
self.assertNotIn("new_test_field", graph)
# Verify existing fields are preserved
self.assertEqual(graph["existing_field"], "existing_value")
self.assertIn("inputs", graph)
self.assertIn("outputs", graph)
self.assertIn("nodes", graph)
# Verify the nn_module_stack was also upgraded by the other upgrader
nodes = graph["nodes"]
self.assertEqual(
nodes[0]["metadata"]["nn_module_stack"], "test_upgrader_test_stack"
)
def test_field_renaming_chain_from_v0_missing_field(self):
"""Test that upgraders work gracefully when old_test_field doesn't exist"""
mock_json = {
"schema_version": {"major": 0, "minor": 0},
"graph_module": {
"graph": {
"inputs": [],
"outputs": [],
"nodes": [],
"existing_field": "existing_value",
}
},
}
# Test the upgrader using the Python binding
serialized_json = json.dumps(mock_json)
upgraded_json_str = torch._C._export.upgrade(serialized_json, 2)
upgraded_json = json.loads(upgraded_json_str)
# Verify the schema version was updated (version 0 -> version 2 due to both v0 and v1 upgraders)
self.assertEqual(upgraded_json["schema_version"]["major"], 2)
self.assertEqual(upgraded_json["schema_version"]["minor"], 0)
# Verify no field transformations occurred since old_test_field didn't exist
graph = upgraded_json["graph_module"]["graph"]
self.assertNotIn("new_test_field2", graph)
self.assertNotIn("new_test_field", graph)
self.assertNotIn("old_test_field", graph)
# Verify existing fields are preserved
self.assertEqual(graph["existing_field"], "existing_value")
self.assertIn("inputs", graph)
self.assertIn("outputs", graph)
self.assertIn("nodes", graph)
def test_field_renaming_from_v1_partial_chain(self):
"""Test partial upgrade chain starting from v1: new_test_field -> new_test_field2"""
mock_json = {
"schema_version": {"major": 1, "minor": 0},
"graph_module": {
"graph": {
"inputs": [],
"outputs": [],
"nodes": [],
"new_test_field": "test_value",
"existing_field": "existing_value",
}
},
}
# Test the upgrader using the Python binding
serialized_json = json.dumps(mock_json)
upgraded_json_str = torch._C._export.upgrade(serialized_json, 2)
upgraded_json = json.loads(upgraded_json_str)
# Verify the schema version was updated (version 1 -> version 2 due to v1 upgrader only)
self.assertEqual(upgraded_json["schema_version"]["major"], 2)
self.assertEqual(upgraded_json["schema_version"]["minor"], 0)
# Verify new_test_field was renamed to new_test_field2
graph = upgraded_json["graph_module"]["graph"]
self.assertIn("new_test_field2", graph)
self.assertEqual(graph["new_test_field2"], "test_value")
self.assertNotIn("new_test_field", graph)
# Verify existing fields are preserved
self.assertEqual(graph["existing_field"], "existing_value")
self.assertIn("inputs", graph)
self.assertIn("outputs", graph)
self.assertIn("nodes", graph)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestUpgrader |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/user_defined_protos_pb2_grpc.py | {
"start": 7803,
"end": 8707
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Predict(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/userdefinedprotos.ImageClassificationService/Predict",
user__defined__protos__pb2.ImageData.SerializeToString,
user__defined__protos__pb2.ImageClass.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
# __end__
| ImageClassificationService |
python | pandas-dev__pandas | pandas/_typing.py | {
"start": 8302,
"end": 8595
} | class ____(BaseBuffer, Protocol[AnyStr_contra]):
__module__: str = "pandas.api.typing.aliases"
def write(self, b: AnyStr_contra, /) -> Any:
# for gzip.GzipFile, bz2.BZ2File
...
def flush(self) -> Any:
# for gzip.GzipFile, bz2.BZ2File
...
| WriteBuffer |
python | redis__redis-py | tests/test_cluster.py | {
"start": 10828,
"end": 40344
} | class ____:
"""
Tests for the RedisCluster class
"""
def test_host_port_startup_node(self):
"""
Test that it is possible to use host & port arguments as startup node
args
"""
cluster = get_mocked_redis_client(host=default_host, port=default_port)
assert cluster.get_node(host=default_host, port=default_port) is not None
def test_startup_nodes(self):
"""
Test that it is possible to use startup_nodes
argument to init the cluster
"""
port_1 = 7000
port_2 = 7001
startup_nodes = [
ClusterNode(default_host, port_1),
ClusterNode(default_host, port_2),
]
cluster = get_mocked_redis_client(startup_nodes=startup_nodes)
assert (
cluster.get_node(host=default_host, port=port_1) is not None
and cluster.get_node(host=default_host, port=port_2) is not None
)
def test_empty_startup_nodes(self):
"""
Test that exception is raised when empty providing empty startup_nodes
"""
with pytest.raises(RedisClusterException) as ex:
RedisCluster(startup_nodes=[])
assert str(ex.value).startswith(
"RedisCluster requires at least one node to discover the cluster"
), str_if_bytes(ex.value)
def test_from_url(self, r):
redis_url = f"redis://{default_host}:{default_port}/0"
with patch.object(RedisCluster, "from_url") as from_url:
def from_url_mocked(_url, **_kwargs):
return get_mocked_redis_client(url=_url, **_kwargs)
from_url.side_effect = from_url_mocked
cluster = RedisCluster.from_url(redis_url)
assert cluster.get_node(host=default_host, port=default_port) is not None
def test_execute_command_errors(self, r):
"""
Test that if no key is provided then exception should be raised.
"""
with pytest.raises(RedisClusterException) as ex:
r.execute_command("GET")
assert str(ex.value).startswith(
"No way to dispatch this command to Redis Cluster. Missing key."
)
def test_execute_command_node_flag_primaries(self, r):
"""
Test command execution with nodes flag PRIMARIES
"""
primaries = r.get_primaries()
replicas = r.get_replicas()
mock_all_nodes_resp(r, "PONG")
assert r.ping(target_nodes=RedisCluster.PRIMARIES) is True
for primary in primaries:
conn = primary.redis_connection.connection
assert conn.read_response.called is True
for replica in replicas:
conn = replica.redis_connection.connection
assert conn.read_response.called is not True
def test_execute_command_node_flag_replicas(self, r):
"""
Test command execution with nodes flag REPLICAS
"""
replicas = r.get_replicas()
if not replicas:
r = get_mocked_redis_client(default_host, default_port)
primaries = r.get_primaries()
mock_all_nodes_resp(r, "PONG")
assert r.ping(target_nodes=RedisCluster.REPLICAS) is True
for replica in replicas:
conn = replica.redis_connection.connection
assert conn.read_response.called is True
for primary in primaries:
conn = primary.redis_connection.connection
assert conn.read_response.called is not True
def test_execute_command_node_flag_all_nodes(self, r):
"""
Test command execution with nodes flag ALL_NODES
"""
mock_all_nodes_resp(r, "PONG")
assert r.ping(target_nodes=RedisCluster.ALL_NODES) is True
for node in r.get_nodes():
conn = node.redis_connection.connection
assert conn.read_response.called is True
def test_execute_command_node_flag_random(self, r):
"""
Test command execution with nodes flag RANDOM
"""
mock_all_nodes_resp(r, "PONG")
assert r.ping(target_nodes=RedisCluster.RANDOM) is True
called_count = 0
for node in r.get_nodes():
conn = node.redis_connection.connection
if conn.read_response.called is True:
called_count += 1
assert called_count == 1
def test_execute_command_default_node(self, r):
"""
Test command execution without node flag is being executed on the
default node
"""
def_node = r.get_default_node()
mock_node_resp(def_node, "PONG")
assert r.ping() is True
conn = def_node.redis_connection.connection
assert conn.read_response.called
def test_ask_redirection(self, r):
"""
Test that the server handles ASK response.
At first call it should return a ASK ResponseError that will point
the client to the next server it should talk to.
Important thing to verify is that it tries to talk to the second node.
"""
redirect_node = r.get_nodes()[0]
with patch.object(Redis, "parse_response") as parse_response:
def ask_redirect_effect(connection, *args, **options):
def ok_response(connection, *args, **options):
assert connection.host == redirect_node.host
assert connection.port == redirect_node.port
return "MOCK_OK"
parse_response.side_effect = ok_response
raise AskError(f"12182 {redirect_node.host}:{redirect_node.port}")
parse_response.side_effect = ask_redirect_effect
assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
def test_handling_cluster_failover_to_a_replica(self, r):
# Set the key we'll test for
key = "key"
r.set("key", "value")
primary = r.get_node_from_key(key, replica=False)
assert str_if_bytes(r.get("key")) == "value"
# Get the current output of cluster slots
cluster_slots = primary.redis_connection.execute_command("CLUSTER SLOTS")
replica_host = ""
replica_port = 0
# Replace one of the replicas to be the new primary based on the
# cluster slots output
for slot_range in cluster_slots:
primary_port = slot_range[2][1]
if primary_port == primary.port:
if len(slot_range) <= 3:
# cluster doesn't have a replica, return
return
replica_host = str_if_bytes(slot_range[3][0])
replica_port = slot_range[3][1]
# replace replica and primary in the cluster slots output
tmp_node = slot_range[2]
slot_range[2] = slot_range[3]
slot_range[3] = tmp_node
break
def raise_connection_error():
raise ConnectionError("error")
def mock_execute_command(*_args, **_kwargs):
if _args[0] == "CLUSTER SLOTS":
return cluster_slots
else:
raise Exception("Failed to mock cluster slots")
# Mock connection error for the current primary
mock_node_resp_func(primary, raise_connection_error)
primary.redis_connection.set_retry(Retry(NoBackoff(), 1))
# Mock the cluster slots response for all other nodes
redis_mock_node = Mock()
redis_mock_node.execute_command.side_effect = mock_execute_command
# Mock response value for all other commands
redis_mock_node.parse_response.return_value = "MOCK_OK"
for node in r.get_nodes():
if node.port != primary.port:
node.redis_connection = redis_mock_node
assert r.get(key) == "MOCK_OK"
new_primary = r.get_node_from_key(key, replica=False)
assert new_primary.host == replica_host
assert new_primary.port == replica_port
assert r.get_node(primary.host, primary.port).server_type == REPLICA
def test_moved_redirection(self, request):
"""
Test that the client handles MOVED response.
"""
moved_redirection_helper(request, failover=False)
def test_moved_redirection_after_failover(self, request):
"""
Test that the client handles MOVED response after a failover.
"""
moved_redirection_helper(request, failover=True)
def test_refresh_using_specific_nodes(self, request):
"""
Test making calls on specific nodes when the cluster has failed over to
another node
"""
node_7006 = ClusterNode(host=default_host, port=7006, server_type=PRIMARY)
node_7007 = ClusterNode(host=default_host, port=7007, server_type=PRIMARY)
with patch.object(Redis, "parse_response") as parse_response:
with patch.object(NodesManager, "initialize", autospec=True) as initialize:
with patch.multiple(
Connection, send_command=DEFAULT, connect=DEFAULT, can_read=DEFAULT
) as mocks:
# simulate 7006 as a failed node
def parse_response_mock(connection, command_name, **options):
if connection.port == 7006:
parse_response.failed_calls += 1
raise ClusterDownError(
"CLUSTERDOWN The cluster is "
"down. Use CLUSTER INFO for "
"more information"
)
elif connection.port == 7007:
parse_response.successful_calls += 1
def initialize_mock(self):
# start with all slots mapped to 7006
self.nodes_cache = {node_7006.name: node_7006}
self.default_node = node_7006
self.slots_cache = {}
for i in range(0, 16383):
self.slots_cache[i] = [node_7006]
# After the first connection fails, a reinitialize
# should follow the cluster to 7007
def map_7007(self):
self.nodes_cache = {node_7007.name: node_7007}
self.default_node = node_7007
self.slots_cache = {}
for i in range(0, 16383):
self.slots_cache[i] = [node_7007]
# Change initialize side effect for the second call
initialize.side_effect = map_7007
parse_response.side_effect = parse_response_mock
parse_response.successful_calls = 0
parse_response.failed_calls = 0
initialize.side_effect = initialize_mock
mocks["can_read"].return_value = False
mocks["send_command"].return_value = "MOCK_OK"
mocks["connect"].return_value = None
with patch.object(
CommandsParser, "initialize", autospec=True
) as cmd_parser_initialize:
def cmd_init_mock(self, r):
self.commands = {
"get": {
"name": "get",
"arity": 2,
"flags": ["readonly", "fast"],
"first_key_pos": 1,
"last_key_pos": 1,
"step_count": 1,
}
}
cmd_parser_initialize.side_effect = cmd_init_mock
rc = _get_client(RedisCluster, request, flushdb=False)
assert len(rc.get_nodes()) == 1
assert rc.get_node(node_name=node_7006.name) is not None
rc.get("foo")
# Cluster should now point to 7007, and there should be
# one failed and one successful call
assert len(rc.get_nodes()) == 1
assert rc.get_node(node_name=node_7007.name) is not None
assert rc.get_node(node_name=node_7006.name) is None
assert parse_response.failed_calls == 1
assert parse_response.successful_calls == 1
@pytest.mark.parametrize(
"read_from_replicas,load_balancing_strategy,mocks_srv_ports",
[
(True, None, [7001, 7002, 7001]),
(True, LoadBalancingStrategy.ROUND_ROBIN, [7001, 7002, 7001]),
(True, LoadBalancingStrategy.ROUND_ROBIN_REPLICAS, [7002, 7002, 7002]),
(True, LoadBalancingStrategy.RANDOM_REPLICA, [7002, 7002, 7002]),
(False, LoadBalancingStrategy.ROUND_ROBIN, [7001, 7002, 7001]),
(False, LoadBalancingStrategy.ROUND_ROBIN_REPLICAS, [7002, 7002, 7002]),
(False, LoadBalancingStrategy.RANDOM_REPLICA, [7002, 7002, 7002]),
],
)
def test_reading_with_load_balancing_strategies(
self,
read_from_replicas: bool,
load_balancing_strategy: LoadBalancingStrategy,
mocks_srv_ports: List[int],
):
with patch.multiple(
Connection,
send_command=DEFAULT,
read_response=DEFAULT,
_connect=DEFAULT,
can_read=DEFAULT,
on_connect=DEFAULT,
) as mocks:
with patch.object(Redis, "parse_response") as parse_response:
def parse_response_mock_first(connection, *args, **options):
# Primary
assert connection.port == mocks_srv_ports[0]
parse_response.side_effect = parse_response_mock_second
return "MOCK_OK"
def parse_response_mock_second(connection, *args, **options):
# Replica
assert connection.port == mocks_srv_ports[1]
parse_response.side_effect = parse_response_mock_third
return "MOCK_OK"
def parse_response_mock_third(connection, *args, **options):
# Primary
assert connection.port == mocks_srv_ports[2]
return "MOCK_OK"
# We don't need to create a real cluster connection but we
# do want RedisCluster.on_connect function to get called,
# so we'll mock some of the Connection's functions to allow it
parse_response.side_effect = parse_response_mock_first
mocks["send_command"].return_value = True
mocks["read_response"].return_value = "OK"
mocks["_connect"].return_value = True
mocks["can_read"].return_value = False
mocks["on_connect"].return_value = True
# Create a cluster with reading from replications
read_cluster = get_mocked_redis_client(
host=default_host,
port=default_port,
read_from_replicas=read_from_replicas,
load_balancing_strategy=load_balancing_strategy,
)
assert read_cluster.read_from_replicas is read_from_replicas
assert read_cluster.load_balancing_strategy is load_balancing_strategy
# Check that we read from the slot's nodes in a round robin
# matter.
# 'foo' belongs to slot 12182 and the slot's nodes are:
# [(127.0.0.1,7001,primary), (127.0.0.1,7002,replica)]
read_cluster.get("foo")
read_cluster.get("foo")
read_cluster.get("foo")
expected_calls_list = []
expected_calls_list.append(call("READONLY"))
expected_calls_list.append(call("GET", "foo", keys=["foo"]))
if (
load_balancing_strategy is None
or load_balancing_strategy == LoadBalancingStrategy.ROUND_ROBIN
):
# in the round robin strategy the primary node can also receive read
# requests and this means that there will be second node connected
expected_calls_list.append(call("READONLY"))
expected_calls_list.extend(
[
call("GET", "foo", keys=["foo"]),
call("GET", "foo", keys=["foo"]),
]
)
mocks["send_command"].assert_has_calls(expected_calls_list)
def test_keyslot(self, r):
"""
Test that method will compute correct key in all supported cases
"""
assert r.keyslot("foo") == 12182
assert r.keyslot("{foo}bar") == 12182
assert r.keyslot("{foo}") == 12182
assert r.keyslot(1337) == 4314
assert r.keyslot(125) == r.keyslot(b"125")
assert r.keyslot(125) == r.keyslot("\x31\x32\x35")
assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
assert r.keyslot(1337.1234) == r.keyslot("1337.1234")
assert r.keyslot(1337) == r.keyslot("1337")
assert r.keyslot(b"abc") == r.keyslot("abc")
def test_get_node_name(self):
assert (
get_node_name(default_host, default_port)
== f"{default_host}:{default_port}"
)
def test_all_nodes(self, r):
"""
Set a list of nodes and it should be possible to iterate over all
"""
nodes = [node for node in r.nodes_manager.nodes_cache.values()]
for i, node in enumerate(r.get_nodes()):
assert node in nodes
def test_all_nodes_masters(self, r):
"""
Set a list of nodes with random primaries/replicas config and it shold
be possible to iterate over all of them.
"""
nodes = [
node
for node in r.nodes_manager.nodes_cache.values()
if node.server_type == PRIMARY
]
for node in r.get_primaries():
assert node in nodes
@pytest.mark.parametrize("error", RedisCluster.ERRORS_ALLOW_RETRY)
def test_cluster_down_overreaches_retry_attempts(self, error):
"""
When error that allows retry is thrown, test that we retry executing
the command as many times as configured in cluster_error_retry_attempts
and then raise the exception
"""
with patch.object(RedisCluster, "_execute_command") as execute_command:
def raise_error(target_node, *args, **kwargs):
execute_command.failed_calls += 1
raise error("mocked error")
execute_command.side_effect = raise_error
rc = get_mocked_redis_client(host=default_host, port=default_port)
with pytest.raises(error):
rc.get("bar")
assert execute_command.failed_calls == rc.cluster_error_retry_attempts
def test_user_on_connect_function(self, request):
"""
Test support in passing on_connect function by the user
"""
def on_connect(connection):
assert connection is not None
mock = Mock(side_effect=on_connect)
_get_client(RedisCluster, request, redis_connect_func=mock)
assert mock.called is True
def test_user_connection_pool_timeout(self, request):
"""
Test support in passing timeout value by the user when setting
up a RedisCluster with a BlockingConnectionPool
"""
timeout = 3
client = _get_client(
RedisCluster,
request,
timeout=timeout,
connection_pool_class=redis.BlockingConnectionPool,
)
for _, node_config in client.nodes_manager.startup_nodes.items():
assert node_config.redis_connection.connection_pool.timeout == timeout
def test_set_default_node_success(self, r):
"""
test successful replacement of the default cluster node
"""
default_node = r.get_default_node()
# get a different node
new_def_node = None
for node in r.get_nodes():
if node != default_node:
new_def_node = node
break
assert r.set_default_node(new_def_node) is True
assert r.get_default_node() == new_def_node
def test_set_default_node_failure(self, r):
"""
test failed replacement of the default cluster node
"""
default_node = r.get_default_node()
new_def_node = ClusterNode("1.1.1.1", 1111)
assert r.set_default_node(None) is False
assert r.set_default_node(new_def_node) is False
assert r.get_default_node() == default_node
def test_get_node_from_key(self, r):
"""
Test that get_node_from_key function returns the correct node
"""
key = "bar"
slot = r.keyslot(key)
slot_nodes = r.nodes_manager.slots_cache.get(slot)
primary = slot_nodes[0]
assert r.get_node_from_key(key, replica=False) == primary
replica = r.get_node_from_key(key, replica=True)
if replica is not None:
assert replica.server_type == REPLICA
assert replica in slot_nodes
@skip_if_redis_enterprise()
def test_not_require_full_coverage_cluster_down_error(self, r):
"""
When require_full_coverage is set to False (default client config) and not
all slots are covered, if one of the nodes has 'cluster-require_full_coverage'
config set to 'yes' some key-based commands should throw ClusterDownError
"""
node = r.get_node_from_key("foo")
missing_slot = r.keyslot("foo")
assert r.set("foo", "bar") is True
try:
assert all(r.cluster_delslots(missing_slot))
with pytest.raises(ClusterDownError):
r.exists("foo")
except ResponseError as e:
assert "CLUSTERDOWN" in str(e)
finally:
try:
# Add back the missing slot
assert r.cluster_addslots(node, missing_slot) is True
# Make sure we are not getting ClusterDownError anymore
assert r.exists("foo") == 1
except ResponseError as e:
if f"Slot {missing_slot} is already busy" in str(e):
# It can happen if the test failed to delete this slot
pass
else:
raise e
def test_timeout_error_topology_refresh_reuse_connections(self, r):
"""
By mucking TIMEOUT errors, we'll force the cluster topology to be reinitialized,
and then ensure that only the impacted connection is replaced
"""
node = r.get_node_from_key("key")
r.set("key", "value")
node_conn_origin = {}
for n in r.get_nodes():
node_conn_origin[n.name] = n.redis_connection
real_func = r.get_redis_connection(node).parse_response
class counter:
def __init__(self, val=0):
self.val = int(val)
count = counter(0)
with patch.object(Redis, "parse_response") as parse_response:
def moved_redirect_effect(connection, *args, **options):
# raise a timeout for 5 times so we'll need to reinitialize the topology
if count.val == 4:
parse_response.side_effect = real_func
count.val += 1
raise TimeoutError()
parse_response.side_effect = moved_redirect_effect
assert r.get("key") == b"value"
for node_name, conn in node_conn_origin.items():
if node_name == node.name:
# The old redis connection of the timed out node should have been
# deleted and replaced
assert conn != r.get_redis_connection(node)
else:
# other nodes' redis connection should have been reused during the
# topology refresh
cur_node = r.get_node(node_name=node_name)
assert conn == r.get_redis_connection(cur_node)
def test_cluster_get_set_retry_object(self, request):
retry = Retry(NoBackoff(), 2)
r = _get_client(RedisCluster, request, retry=retry)
assert r.retry.get_retries() == retry.get_retries()
assert isinstance(r.retry._backoff, NoBackoff)
for node in r.get_nodes():
assert node.redis_connection.get_retry().get_retries() == 0
assert isinstance(node.redis_connection.get_retry()._backoff, NoBackoff)
rand_node = r.get_random_node()
existing_conn = rand_node.redis_connection.connection_pool.get_connection()
# Change retry policy
new_retry = Retry(ExponentialBackoff(), 3)
r.set_retry(new_retry)
assert r.retry.get_retries() == new_retry.get_retries()
assert isinstance(r.retry._backoff, ExponentialBackoff)
for node in r.get_nodes():
assert node.redis_connection.get_retry()._retries == 0
assert isinstance(node.redis_connection.get_retry()._backoff, NoBackoff)
assert existing_conn.retry._retries == 0
new_conn = rand_node.redis_connection.connection_pool.get_connection()
assert new_conn.retry._retries == 0
def test_cluster_retry_object(self, r) -> None:
# Test default retry
# FIXME: Workaround for https://github.com/redis/redis-py/issues/3030
host = r.get_default_node().host
# test default retry config
retry = r.retry
assert isinstance(retry, Retry)
assert retry.get_retries() == 3
assert isinstance(retry._backoff, type(ExponentialWithJitterBackoff()))
node1_connection = r.get_node(host, 16379).redis_connection
node2_connection = r.get_node(host, 16380).redis_connection
assert node1_connection.get_retry()._retries == 0
assert node2_connection.get_retry()._retries == 0
# Test custom retry is not applied to nodes
retry = Retry(ExponentialBackoff(10, 5), 5)
rc_custom_retry = RedisCluster(host, 16379, retry=retry)
assert (
rc_custom_retry.get_node(host, 16379)
.redis_connection.get_retry()
.get_retries()
== 0
)
def test_replace_cluster_node(self, r) -> None:
prev_default_node = r.get_default_node()
r.replace_default_node()
assert r.get_default_node() != prev_default_node
r.replace_default_node(prev_default_node)
assert r.get_default_node() == prev_default_node
def test_default_node_is_replaced_after_exception(self, r):
curr_default_node = r.get_default_node()
# CLUSTER NODES command is being executed on the default node
nodes = r.cluster_nodes()
assert "myself" in nodes.get(curr_default_node.name).get("flags")
def raise_connection_error():
raise ConnectionError("error")
# Mock connection error for the default node
mock_node_resp_func(curr_default_node, raise_connection_error)
# Test that the command succeed from a different node
nodes = r.cluster_nodes()
assert "myself" not in nodes.get(curr_default_node.name).get("flags")
assert r.get_default_node() != curr_default_node
def test_address_remap(self, request, master_host):
"""Test that we can create a rediscluster object with
a host-port remapper and map connections through proxy objects
"""
# we remap the first n nodes
offset = 1000
n = 6
hostname, master_port = master_host
ports = [master_port + i for i in range(n)]
def address_remap(address):
# remap first three nodes to our local proxy
# old = host, port
host, port = address
if int(port) in ports:
host, port = "127.0.0.1", int(port) + offset
# print(f"{old} {host, port}")
return host, port
# create the proxies
proxies = [
NodeProxy(("127.0.0.1", port + offset), (hostname, port)) for port in ports
]
for p in proxies:
p.start()
try:
# create cluster:
r = _get_client(
RedisCluster, request, flushdb=False, address_remap=address_remap
)
try:
assert r.ping() is True
assert r.set("byte_string", b"giraffe")
assert r.get("byte_string") == b"giraffe"
finally:
r.close()
finally:
for p in proxies:
p.close()
# verify that the proxies were indeed used
n_used = sum((1 if p.n_connections else 0) for p in proxies)
assert n_used > 1
@pytest.mark.onlycluster
| TestRedisClusterObj |
python | ApeWorX__ape | src/ape/api/providers.py | {
"start": 34886,
"end": 44519
} | class ____(ProviderAPI):
"""
A provider that manages a process, such as for ``ganache``.
"""
PROCESS_WAIT_TIMEOUT: int = 15
background: bool = False
process: Optional[Popen] = None
allow_start: bool = True
is_stopping: bool = False
stdout_queue: Optional[JoinableQueue] = None
stderr_queue: Optional[JoinableQueue] = None
@property
@abstractmethod
def process_name(self) -> str:
"""The name of the process, such as ``Hardhat node``."""
@abstractmethod
def build_command(self) -> list[str]:
"""
Get the command as a list of ``str``.
Subclasses should override and add command arguments if needed.
Returns:
list[str]: The command to pass to ``subprocess.Popen``.
"""
@property
def base_logs_path(self) -> Path:
return self.config_manager.DATA_FOLDER / self.name / "subprocess_output"
@property
def stdout_logs_path(self) -> Path:
return self.base_logs_path / "stdout.log"
@property
def stderr_logs_path(self) -> Path:
return self.base_logs_path / "stderr.log"
@cached_property
def _stdout_logger(self) -> Logger:
return self._get_process_output_logger("stdout", self.stdout_logs_path)
@cached_property
def _stderr_logger(self) -> Logger:
return self._get_process_output_logger("stderr", self.stderr_logs_path)
@property
def connection_id(self) -> Optional[str]:
cmd_id = ",".join(self.build_command())
return f"{self.network_choice}:{cmd_id}"
def _get_process_output_logger(self, name: str, path: Path):
logger = getLogger(f"{self.name}_{name}_subprocessProviderLogger")
path.parent.mkdir(parents=True, exist_ok=True)
if path.is_file():
path.unlink()
path.touch()
handler = FileHandler(str(path))
handler.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
def connect(self):
"""
Start the process and connect to it.
Subclasses handle the connection-related tasks.
"""
if self.is_connected:
raise ProviderError("Cannot connect twice. Call disconnect before connecting again.")
# Always disconnect after,
# unless running tests with `disconnect_providers_after: false`.
disconnect_after = (
self._test_runner is None
or self.config_manager.get_config("test").disconnect_providers_after
)
if disconnect_after:
atexit.register(self._disconnect_atexit)
# Register handlers to ensure atexit handlers are called when Python dies.
def _signal_handler(signum, frame):
atexit._run_exitfuncs()
sys.exit(143 if signum == SIGTERM else 130)
signal(SIGINT, _signal_handler)
signal(SIGTERM, _signal_handler)
def _disconnect_atexit(self):
if self.background:
return
try:
self.disconnect()
except Exception as err:
logger.error(f"Error while disconnecting: {err}")
def disconnect(self):
"""
Stop the process if it exists.
Subclasses override this method to do provider-specific disconnection tasks.
"""
if self.process:
self.stop()
# Delete entry from managed list of running nodes.
self.network_manager.running_nodes.remove_provider(self)
def start(self, timeout: int = 20):
"""Start the process and wait for its RPC to be ready."""
if self.is_connected:
logger.info(f"Connecting to existing '{self.process_name}' process.")
self.process = None # Not managing the process.
elif self.allow_start:
logger.info(f"Starting '{self.process_name}' process.")
pre_exec_fn = _linux_set_death_signal if platform.uname().system == "Linux" else None
self.stderr_queue = JoinableQueue()
self.stdout_queue = JoinableQueue()
if self.background or logger.level > LogLevel.DEBUG:
out_file = DEVNULL
else:
out_file = PIPE
cmd = self.build_command()
process = popen(cmd, preexec_fn=pre_exec_fn, stdout=out_file, stderr=out_file)
self.process = process
spawn(self.produce_stdout_queue)
spawn(self.produce_stderr_queue)
spawn(self.consume_stdout_queue)
spawn(self.consume_stderr_queue)
# Cache the process so we can manage it even if lost.
self.network_manager.running_nodes.cache_provider(self)
with RPCTimeoutError(self, seconds=timeout) as _timeout:
while True:
if self.is_connected:
break
time.sleep(0.1)
_timeout.check()
else:
raise ProviderError("Process not started and cannot connect to existing process.")
def produce_stdout_queue(self):
process = self.process
if self.stdout_queue is None or process is None:
return
stdout = process.stdout
if stdout is None:
return
for line in iter(stdout.readline, b""):
self.stdout_queue.put(line)
time.sleep(0)
def produce_stderr_queue(self):
process = self.process
if self.stderr_queue is None or process is None:
return
stderr = process.stderr
if stderr is None:
return
for line in iter(stderr.readline, b""):
self.stderr_queue.put(line)
time.sleep(0)
def consume_stdout_queue(self):
if self.stdout_queue is None:
return
for line in self.stdout_queue:
output = line.decode("utf8").strip()
logger.debug(output)
self._stdout_logger.debug(output)
if self.stdout_queue is not None:
self.stdout_queue.task_done()
time.sleep(0)
def consume_stderr_queue(self):
if self.stderr_queue is None:
return
for line in self.stderr_queue:
logger.debug(line.decode("utf8").strip())
self._stdout_logger.debug(line)
if self.stderr_queue is not None:
self.stderr_queue.task_done()
time.sleep(0)
def stop(self):
"""Kill the process."""
if not self.process or self.is_stopping:
return
self.is_stopping = True
logger.info(f"Stopping '{self.process_name}' process.")
self._kill_process()
self.is_stopping = False
self.process = None
def _wait_for_popen(self, timeout: int = 30):
if not self.process:
# Mostly just to make mypy happy.
raise SubprocessError("Unable to wait for process. It is not set yet.")
try:
with SubprocessTimeoutError(self, seconds=timeout) as _timeout:
while self.process.poll() is None:
time.sleep(0.1)
_timeout.check()
except SubprocessTimeoutError:
pass
def _kill_process(self):
if platform.uname().system == "Windows":
self._windows_taskkill()
return
warn_prefix = f"Trying to close '{self.process_name}' process."
def _try_close(warn_message):
try:
if self.process:
self.process.send_signal(SIGINT)
self._wait_for_popen(self.PROCESS_WAIT_TIMEOUT)
except KeyboardInterrupt:
logger.warning(warn_message)
try:
if self.process is not None and self.process.poll() is None:
_try_close(f"{warn_prefix}. Press Ctrl+C 1 more times to force quit")
if self.process is not None and self.process.poll() is None:
self.process.kill()
self._wait_for_popen(2)
except KeyboardInterrupt:
if self.process is not None:
self.process.kill()
self.process = None
def _windows_taskkill(self) -> None:
"""
Kills the given process and all child processes using taskkill.exe. Used
for subprocesses started up on Windows which run in a cmd.exe wrapper that
doesn't propagate signals by default (leaving orphaned processes).
"""
process = self.process
if not process:
return
taskkill_bin = shutil.which("taskkill")
if not taskkill_bin:
raise SubprocessError("Could not find taskkill.exe executable.")
proc = Popen(
[
taskkill_bin,
"/F", # forcefully terminate
"/T", # terminate child processes
"/PID",
str(process.pid),
]
)
proc.wait(timeout=self.PROCESS_WAIT_TIMEOUT)
def _linux_set_death_signal():
"""
Automatically sends SIGTERM to child subprocesses when parent process
dies (only usable on Linux).
"""
# from: https://stackoverflow.com/a/43152455/75956
# the first argument, 1, is the flag for PR_SET_PDEATHSIG
# the second argument is what signal to send to child subprocesses
libc = ctypes.CDLL("libc.so.6")
return libc.prctl(1, SIGTERM)
def popen(cmd: list[str], **kwargs):
# Abstracted for testing purposes.
return Popen(cmd, **kwargs)
| SubprocessProvider |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 1600,
"end": 2495
} | class ____(BaseActionValidatorHandler):
provider = Action.Type.SLACK
notify_action_form = SlackNotifyServiceForm
def generate_action_form_data(self) -> dict[str, Any]:
return {
"workspace": self.validated_data["integration_id"],
"channel": self.validated_data["config"]["target_display"],
"channel_id": self.validated_data["config"].get("target_identifier"),
"tags": self.validated_data["data"].get("tags"),
}
def update_action_data(self, cleaned_data: dict[str, Any]) -> dict[str, Any]:
self.validated_data["config"].update(
{
"target_display": cleaned_data["channel"],
"target_identifier": cleaned_data["channel_id"],
}
)
return self.validated_data
@action_validator_registry.register(Action.Type.MSTEAMS)
| SlackActionValidatorHandler |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_bedrock.py | {
"start": 3433,
"end": 4787
} | class ____(TestBedrockCustomWaitersBase):
WAITER_NAME = "provisioned_model_throughput_complete"
@pytest.fixture
def mock_get_job(self):
with mock.patch.object(self.client, "get_provisioned_model_throughput") as mock_getter:
yield mock_getter
@pytest.mark.parametrize("state", BedrockProvisionModelThroughputCompletedSensor.SUCCESS_STATES)
def test_model_customization_job_complete(self, state, mock_get_job):
mock_get_job.return_value = {"status": state}
BedrockHook().get_waiter(self.WAITER_NAME).wait(jobIdentifier="job_id")
@pytest.mark.parametrize("state", BedrockProvisionModelThroughputCompletedSensor.FAILURE_STATES)
def test_model_customization_job_failed(self, state, mock_get_job):
mock_get_job.return_value = {"status": state}
with pytest.raises(botocore.exceptions.WaiterError):
BedrockHook().get_waiter(self.WAITER_NAME).wait(jobIdentifier="job_id")
def test_model_customization_job_wait(self, mock_get_job):
wait = {"status": "Creating"}
success = {"status": "InService"}
mock_get_job.side_effect = [wait, wait, success]
BedrockHook().get_waiter(self.WAITER_NAME).wait(
jobIdentifier="job_id", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
| TestProvisionedModelThroughputCompleteWaiter |
python | more-itertools__more-itertools | more_itertools/more.py | {
"start": 164501,
"end": 166576
} | class ____:
"""Wrap a non-concurrent iterator with a lock to enforce sequential access.
Applies a non-reentrant lock around calls to ``__next__``, allowing
iterator and generator instances to be shared by multiple consumer
threads.
"""
__slots__ = ('iterator', 'lock')
def __init__(self, iterable):
self.iterator = iter(iterable)
self.lock = Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.iterator)
def synchronized(func):
"""Wrap an iterator-returning callable to make its iterators thread-safe.
Existing itertools and more-itertools can be wrapped so that their
iterator instances are serialized.
For example, ``itertools.count`` does not make thread-safe instances,
but that is easily fixed with::
atomic_counter = synchronized(itertools.count)
Can also be used as a decorator for generator functions definitions
so that the generator instances are serialized::
@synchronized
def enumerate_and_timestamp(iterable):
for count, value in enumerate(iterable):
yield count, time_ns(), value
"""
@wraps(func)
def inner(*args, **kwargs):
iterator = func(*args, **kwargs)
return serialize(iterator)
return inner
def concurrent_tee(iterable, n=2):
"""Variant of itertools.tee() but with guaranteed threading semantics.
Takes a non-threadsafe iterator as an input and creates concurrent
tee objects for other threads to have reliable independent copies of
the data stream.
The new iterators are only thread-safe if consumed within a single thread.
To share just one of the new iterators across multiple threads, wrap it
with :func:`serialize`.
"""
if n < 0:
raise ValueError
if n == 0:
return ()
iterator = _concurrent_tee(iterable)
result = [iterator]
for _ in range(n - 1):
result.append(_concurrent_tee(iterator))
return tuple(result)
| serialize |
python | pydantic__pydantic | pydantic/_internal/_decorators.py | {
"start": 3487,
"end": 4423
} | class ____:
"""A container for data from `@field_serializer` so that we can access it
while building the pydantic-core schema.
Attributes:
decorator_repr: A class variable representing the decorator string, '@field_serializer'.
fields: A tuple of field names the serializer should be called on.
mode: The proposed serializer mode.
return_type: The type of the serializer's return value.
when_used: The serialization condition. Accepts a string with values `'always'`, `'unless-none'`, `'json'`,
and `'json-unless-none'`.
check_fields: Whether to check that the fields actually exist on the model.
"""
decorator_repr: ClassVar[str] = '@field_serializer'
fields: tuple[str, ...]
mode: Literal['plain', 'wrap']
return_type: Any
when_used: core_schema.WhenUsed
check_fields: bool | None
@dataclass(**slots_true)
| FieldSerializerDecoratorInfo |
python | wandb__wandb | tests/fixtures/wandb_backend_spy/spy.py | {
"start": 178,
"end": 7269
} | class ____:
"""A spy that intercepts interactions with the W&B backend."""
def __init__(self) -> None:
self._lock = threading.Lock()
self._runs: dict[str, _RunData] = {}
self._gql_stubs: list[gql_match.GQLStub] = []
self._filestream_stubs: list[_FileStreamResponse] = []
@contextlib.contextmanager
def freeze(self) -> Iterator[WandbBackendSnapshot]:
"""A context manager in which the spied state can be queried.
Usage:
with wandb_backend_spy.freeze() as snapshot:
history = snapshot.history(run_id=run_id)
assert history[0]["metric"] == "expected_value"
"""
snapshot = WandbBackendSnapshot()
with self._lock:
snapshot._spy = self
try:
yield snapshot
finally:
snapshot._spy = None
# Provide an alias so that tests don't need to import gql_match.py.
gql = gql_match
def stub_gql(
self,
match: gql_match.Matcher,
respond: gql_match.Responder,
) -> None:
"""Stub the GraphQL endpoint.
Later calls to `stub_gql` take precedence. For example, this
responds "b" to the first UpsertBucket call, then "a" to all others:
gql = wandb_backend_spy.gql
matcher = gql.Matcher(operation="UpsertBucket")
wandb_backend_spy.stub_gql(matcher, gql.Constant(content="a"))
wandb_backend_spy.stub_gql(matcher, gql.once(content="b"))
This allows helper fixtures to set defaults for tests.
Args:
match: Which GraphQL requests to intercept.
respond: How to handle matched requests.
"""
with self._lock:
self._gql_stubs.append((match, respond))
def stub_filestream(
self,
body: str | dict[str, Any],
*,
status: int,
n_times: int = 1,
) -> None:
"""Stub the FileStream endpoint.
The next `n_times` requests to FileStream are intercepted and return
the given status and body. Unlike `stub_gql`, this does not allow
selecting specific requests to intercept.
Later calls to `stub_filestream` take precedence.
Args:
body: The request body. If a dict, it is converted to JSON.
status: The HTTP status code to return.
n_times: The number of requests to intercept.
"""
if not isinstance(body, str):
body = json.dumps(body)
with self._lock:
self._filestream_stubs.extend(
[_FileStreamResponse(status=status, body=body)] * n_times
)
def intercept_graphql(self, request_raw: bytes) -> fastapi.Response | None:
"""Intercept a GraphQL request to produce a fake response."""
with self._lock:
stubs = self._gql_stubs
if not stubs:
return None
request = json.loads(request_raw)
query = request.get("query", "")
variables = request.get("variables", {})
for matcher, responder in reversed(stubs):
if not matcher.matches(query, variables):
continue
response = responder.respond(query, variables)
if not response:
continue
return response
return None
def intercept_filestream(self) -> fastapi.Response | None:
"""Intercept a FileStream request to produce a fake response."""
with self._lock:
stubs = self._filestream_stubs
if not stubs:
return None
stub = stubs.pop()
return fastapi.Response(
status_code=stub.status,
content=stub.body,
)
def post_graphql(
self,
request_raw: bytes,
response_raw: bytes,
) -> None:
"""Spy on a GraphQL request and response."""
request = json.loads(request_raw)
with self._lock:
query: str | None = request.get("query")
variables: dict[str, Any] | None = request.get("variables")
if query is None or variables is None:
return
self._spy_upsert_bucket(query, variables)
def _spy_upsert_bucket(
self,
query: str,
variables: dict[str, Any],
) -> None:
"""Change spied state based on UpsertBucket requests.
Requires self._lock.
"""
# NOTE: This is an exact-string match to the query we send.
# It does not depend on the GraphQL schema.
if "mutation UpsertBucket" not in query:
return
# "name" is the usual run ID (part of the run URL),
# and "id" is the "storage ID" which is sometimes used in
# the public API. We use both interchangeably in tests to keep
# usage simple.
if "name" in variables:
run_id = variables["name"]
elif "id" in variables:
run_id = variables["id"]
else:
raise KeyError("Unexpected UpsertBucket without name or id")
run = self._runs.setdefault(run_id, _RunData())
config = variables.get("config")
if config is not None:
run._config_json_string = config
tags = variables.get("tags")
if tags is not None:
run._tags = tags
repo = variables.get("repo")
if repo is not None:
run._remote = repo
commit = variables.get("commit")
if commit is not None:
run._commit = commit
sweep = variables.get("sweep")
if sweep is not None:
run._sweep_name = sweep
summary_metrics = variables.get("summaryMetrics")
if summary_metrics is not None:
# We use the wandb-summary.json file of the FileStream API
# as the source of truth for the run's summary.
summary = run._file_stream_files.setdefault("wandb-summary.json", {})
last_line_offset = max(summary.keys(), default=0)
summary[last_line_offset] = summary_metrics
def post_file_stream(
self,
request_raw: bytes,
response_raw: bytes,
*,
entity: str,
project: str,
run_id: str,
) -> None:
"""Spy on a FileStream request and response."""
request = json.loads(request_raw)
with self._lock:
run = self._runs.setdefault(run_id, _RunData())
run._was_ever_preempting |= request.get("preempting", False)
run._uploaded_files |= set(request.get("uploaded", []))
run._completed = request.get("complete", False)
run._exit_code = request.get("exitcode")
for file_name, file_data in request.get("files", {}).items():
file = run._file_stream_files.setdefault(file_name, {})
offset = file_data["offset"]
for line in file_data["content"]:
file[offset] = line
offset += 1
| WandbBackendSpy |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 10304,
"end": 10404
} | class ____(VyperException):
"""Numeric value out of range for the given type."""
| OverflowException |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 9266,
"end": 9325
} | class ____(Token):
__slots__ = ()
id = ':'
| ValueToken |
python | getsentry__sentry | src/sentry/migrations/0946_add_has_mcp_insights_flag.py | {
"start": 170,
"end": 2859
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0945_move_discover_models"),
]
operations = [
migrations.AlterField(
model_name="project",
name="flags",
field=bitfield.models.BitField(
[
"has_releases",
"has_issue_alerts_targeting",
"has_transactions",
"has_alert_filters",
"has_sessions",
"has_profiles",
"has_replays",
"has_feedbacks",
"has_new_feedbacks",
"spike_protection_error_currently_active",
"spike_protection_transaction_currently_active",
"spike_protection_attachment_currently_active",
"has_minified_stack_trace",
"has_cron_monitors",
"has_cron_checkins",
"has_sourcemaps",
"has_custom_metrics",
"has_high_priority_alerts",
"has_insights_http",
"has_insights_db",
"has_insights_assets",
"has_insights_app_start",
"has_insights_screen_load",
"has_insights_vitals",
"has_insights_caches",
"has_insights_queues",
"has_insights_llm_monitoring",
"has_flags",
"has_insights_agent_monitoring",
"has_insights_mcp",
],
default=10,
),
),
]
| Migration |
python | google__pytype | pytype/rewrite/abstract/classes.py | {
"start": 5270,
"end": 6097
} | class ____(SimpleClass):
"""Class defined in the current module."""
def __init__(
self,
ctx: base.ContextType,
name: str,
members: dict[str, base.BaseValue],
bases: Sequence[SimpleClass],
keywords: Mapping[str, base.BaseValue],
functions: Sequence[functions_lib.InterpreterFunction],
classes: Sequence['InterpreterClass'],
):
super().__init__(ctx, name, members, bases, keywords)
# Functions and classes defined in this class's body. Unlike 'members',
# ignores the effects of post-definition transformations like decorators.
self.functions = functions
self.classes = classes
def __repr__(self):
return f'InterpreterClass({self.name})'
@property
def _attrs(self):
return (self.name, datatypes.immutabledict(self.members))
| InterpreterClass |
python | getsentry__sentry | src/sentry/integrations/github/integration.py | {
"start": 32582,
"end": 34863
} | class ____(PRCommentWorkflow):
organization_option_key = "sentry:github_pr_bot"
referrer = Referrer.GITHUB_PR_COMMENT_BOT
referrer_id = GITHUB_PR_BOT_REFERRER
@staticmethod
def format_comment_subtitle(subtitle: str) -> str:
return subtitle[:47] + "..." if len(subtitle) > 50 else subtitle
@staticmethod
def format_comment_url(url: str, referrer: str) -> str:
return url + "?referrer=" + referrer
def get_comment_body(self, issue_ids: list[int]) -> str:
issues = Group.objects.filter(id__in=issue_ids).order_by("id").all()
issue_list = "\n".join(
[
self.get_merged_pr_single_issue_template(
title=issue.title,
url=self.format_comment_url(issue.get_absolute_url(), self.referrer_id),
environment=self.get_environment_info(issue),
)
for issue in issues
]
)
return MERGED_PR_COMMENT_BODY_TEMPLATE.format(issue_list=issue_list)
def get_comment_data(
self,
organization: Organization,
repo: Repository,
pr: PullRequest,
comment_body: str,
issue_ids: list[int],
) -> dict[str, Any]:
enabled_copilot = features.has("organizations:gen-ai-features", organization)
comment_data: dict[str, Any] = {
"body": comment_body,
}
if enabled_copilot:
comment_data["actions"] = [
{
"name": f"Root cause #{i + 1}",
"type": "copilot-chat",
"prompt": f"@sentry root cause issue {str(issue_id)} with PR URL https://github.com/{repo.name}/pull/{str(pr.key)}",
}
for i, issue_id in enumerate(issue_ids[:3])
]
return comment_data
def process_api_error(e: ApiError) -> list[dict[str, Any]] | None:
if e.json:
message = e.json.get("message", "")
if RATE_LIMITED_MESSAGE in message:
return []
elif "403 Forbidden" in message:
return []
elif e.code == 404 or e.code == 403:
return []
elif isinstance(e, ApiInvalidRequestError):
return []
return None
| GitHubPRCommentWorkflow |
python | django__django | tests/admin_widgets/tests.py | {
"start": 72372,
"end": 76206
} | class ____(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.blues = Band.objects.create(name="Bogey Blues")
self.potatoes = Band.objects.create(name="Green Potatoes")
@screenshot_cases(["desktop_size", "mobile_size", "rtl", "dark", "high_contrast"])
def test_ForeignKey(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_event_add")
)
main_window = self.selenium.current_window_handle
self.take_screenshot("raw_id_widget")
# No value has been selected yet
self.assertEqual(
self.selenium.find_element(By.ID, "id_main_band").get_attribute("value"), ""
)
# Open the popup window and click on a band
self.selenium.find_element(By.ID, "lookup_id_main_band").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues")
self.assertIn(f"/band/{self.blues.pk}/", link.get_attribute("href"))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_main_band", str(self.blues.pk))
# Reopen the popup window and click on another band
self.selenium.find_element(By.ID, "lookup_id_main_band").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes")
self.assertIn(f"/band/{self.potatoes.pk}/", link.get_attribute("href"))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_main_band", str(self.potatoes.pk))
def test_many_to_many(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_event_add")
)
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element(By.ID, "id_supporting_bands").get_attribute(
"value"
),
"",
)
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element(
By.CSS_SELECTOR, ".field-supporting_bands div.help"
).text,
"Supporting Bands.",
)
# Open the popup window and click on a band
self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues")
self.assertIn(f"/band/{self.blues.pk}/", link.get_attribute("href"))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_supporting_bands", str(self.blues.pk))
# Reopen the popup window and click on another band
self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes")
self.assertIn(f"/band/{self.potatoes.pk}/", link.get_attribute("href"))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value(
"#id_supporting_bands", f"{self.blues.pk},{self.potatoes.pk}"
)
| AdminRawIdWidgetSeleniumTests |
python | numpy__numpy | numpy/f2py/_backends/_meson.py | {
"start": 4494,
"end": 8626
} | class ____(Backend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dependencies = self.extra_dat.get("dependencies", [])
self.meson_build_dir = "bbdir"
self.build_type = (
"debug" if any("debug" in flag for flag in self.fc_flags) else "release"
)
self.fc_flags = _get_flags(self.fc_flags)
def _move_exec_to_root(self, build_dir: Path):
walk_dir = Path(build_dir) / self.meson_build_dir
path_objects = chain(
walk_dir.glob(f"{self.modulename}*.so"),
walk_dir.glob(f"{self.modulename}*.pyd"),
walk_dir.glob(f"{self.modulename}*.dll"),
)
# Same behavior as distutils
# https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293
for path_object in path_objects:
dest_path = Path.cwd() / path_object.name
if dest_path.exists():
dest_path.unlink()
shutil.copy2(path_object, dest_path)
os.remove(path_object)
def write_meson_build(self, build_dir: Path) -> None:
"""Writes the meson build file at specified location"""
meson_template = MesonTemplate(
self.modulename,
self.sources,
self.dependencies,
self.libraries,
self.library_dirs,
self.include_dirs,
self.extra_objects,
self.flib_flags,
self.fc_flags,
self.build_type,
sys.executable,
)
src = meson_template.generate_meson_build()
Path(build_dir).mkdir(parents=True, exist_ok=True)
meson_build_file = Path(build_dir) / "meson.build"
meson_build_file.write_text(src)
return meson_build_file
def _run_subprocess_command(self, command, cwd):
subprocess.run(command, cwd=cwd, check=True)
def run_meson(self, build_dir: Path):
setup_command = ["meson", "setup", self.meson_build_dir]
self._run_subprocess_command(setup_command, build_dir)
compile_command = ["meson", "compile", "-C", self.meson_build_dir]
self._run_subprocess_command(compile_command, build_dir)
def compile(self) -> None:
self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir)
_prepare_objects(self.modulename, self.extra_objects, self.build_dir)
self.write_meson_build(self.build_dir)
self.run_meson(self.build_dir)
self._move_exec_to_root(self.build_dir)
def _prepare_sources(mname, sources, bdir):
extended_sources = sources.copy()
Path(bdir).mkdir(parents=True, exist_ok=True)
# Copy sources
for source in sources:
if Path(source).exists() and Path(source).is_file():
shutil.copy(source, bdir)
generated_sources = [
Path(f"{mname}module.c"),
Path(f"{mname}-f2pywrappers2.f90"),
Path(f"{mname}-f2pywrappers.f"),
]
bdir = Path(bdir)
for generated_source in generated_sources:
if generated_source.exists():
shutil.copy(generated_source, bdir / generated_source.name)
extended_sources.append(generated_source.name)
generated_source.unlink()
extended_sources = [
Path(source).name
for source in extended_sources
if not Path(source).suffix == ".pyf"
]
return extended_sources
def _prepare_objects(mname, objects, bdir):
Path(bdir).mkdir(parents=True, exist_ok=True)
# Copy objects
for obj in objects:
if Path(obj).exists() and Path(obj).is_file():
shutil.copy(obj, bdir)
def _get_flags(fc_flags):
flag_values = []
flag_pattern = re.compile(r"--f(77|90)flags=(.*)")
for flag in fc_flags:
match_result = flag_pattern.match(flag)
if match_result:
values = match_result.group(2).strip().split()
values = [val.strip("'\"") for val in values]
flag_values.extend(values)
# Hacky way to preserve order of flags
unique_flags = list(dict.fromkeys(flag_values))
return unique_flags
| MesonBackend |
python | joblib__joblib | joblib/externals/loky/backend/synchronize.py | {
"start": 6808,
"end": 10910
} | class ____:
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (
self._lock,
self._sleeping_count,
self._woken_count,
self._wait_semaphore,
)
def __setstate__(self, state):
(
self._lock,
self._sleeping_count,
self._woken_count,
self._wait_semaphore,
) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (
self._sleeping_count._semlock._get_value()
- self._woken_count._semlock._get_value()
)
except Exception:
num_waiters = "unknown"
return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
def wait(self, timeout=None):
assert (
self._lock._semlock._is_mine()
), "must acquire() condition before using wait()"
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for _ in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for _ in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), "lock is not owned"
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), "lock is not owned"
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for _ in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
| Condition |
python | redis__redis-py | redis/exceptions.py | {
"start": 3782,
"end": 4103
} | class ____(ResponseError):
"""
Error indicated TRYAGAIN error received from cluster.
Operations on keys that don't exist or are - during resharding - split
between the source and destination nodes, will generate a -TRYAGAIN error.
"""
def __init__(self, *args, **kwargs):
pass
| TryAgainError |
python | ray-project__ray | python/ray/data/_internal/execution/interfaces/op_runtime_metrics.py | {
"start": 4130,
"end": 4274
} | class ____:
num_tasks_finished: int = 0
bytes_outputs_of_finished_tasks: int = 0
blocks_outputs_of_finished_tasks: int = 0
| NodeMetrics |
python | python-poetry__poetry | tests/installation/test_installer.py | {
"start": 1958,
"end": 101253
} | class ____(BaseLocker):
def __init__(self, lock_path: Path) -> None:
self._lock = lock_path / "poetry.lock"
self._written_data = None
self._locked = False
self._fresh = True
self._lock_data = None
self._content_hash = self._get_content_hash()
@property
def written_data(self) -> dict[str, Any]:
assert self._written_data is not None
return self._written_data
def set_lock_path(self, lock: Path) -> Locker:
self._lock = lock / "poetry.lock"
return self
def locked(self, is_locked: bool = True) -> Locker:
self._locked = is_locked
return self
def mock_lock_data(self, data: dict[str, Any]) -> None:
self._lock_data = data
def is_locked(self) -> bool:
return self._locked
def fresh(self, is_fresh: bool = True) -> Locker:
self._fresh = is_fresh
return self
def is_fresh(self) -> bool:
return self._fresh
def _get_content_hash(self, *, with_dependency_groups: bool = True) -> str:
return "123456789"
def _write_lock_data(self, data: dict[str, Any]) -> None:
for package in data["package"]:
python_versions = str(package["python-versions"])
package["python-versions"] = python_versions
self._written_data = json.loads(json.dumps(data))
self._lock_data = data
@pytest.fixture(autouse=True, params=[False, True])
def config_installer_reresolve(
config: Config, request: FixtureRequest
) -> Iterator[bool]:
config.config["installer"]["re-resolve"] = request.param
yield request.param
@pytest.fixture()
def package() -> ProjectPackage:
p = ProjectPackage("root", "1.0")
p.root_dir = Path.cwd()
return p
@pytest.fixture()
def repo() -> Repository:
return Repository("repo")
@pytest.fixture()
def pool(repo: Repository) -> RepositoryPool:
pool = RepositoryPool()
pool.add_repository(repo)
return pool
@pytest.fixture()
def installed() -> CustomInstalledRepository:
return CustomInstalledRepository()
@pytest.fixture()
def locker(project_root: Path) -> Locker:
return Locker(lock_path=project_root)
@pytest.fixture()
def env(tmp_path: Path) -> NullEnv:
return NullEnv(path=tmp_path)
@pytest.fixture()
def installer(
package: ProjectPackage,
pool: RepositoryPool,
locker: Locker,
env: NullEnv,
installed: CustomInstalledRepository,
config: Config,
) -> Installer:
return Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
def fixture(name: str, data: dict[str, Any] | None = None) -> dict[str, Any]:
"""
Create or load a fixture file in TOML format.
This function retrieves the contents of a test fixture file, optionally writing
data to it before reading, and returns the data as a dictionary. It is used to
manage testing fixtures for TOML-based configurations.
:param name: Name of the fixture file (without extension, default of .test is appended).
:param data: Dictionary to write to the file as a TOML document. If None,
no data is written (use this only when generating fixtures).
:return: Dictionary representing the contents of the TOML fixture file.
"""
file = TOMLFile(Path(__file__).parent / "fixtures" / f"{name}.test")
if data:
# if data is provided write it, this is helpful for generating fixtures
# we expect lock data to be compatible with TOMLDocument for our purposes
file.write(cast("TOMLDocument", data))
content: dict[str, Any] = file.read()
return content
def fix_lock_data(lock_data: dict[str, Any]) -> None:
if Version.parse(lock_data["metadata"]["lock-version"]) >= Version.parse("2.1"):
for locked_package in lock_data["package"]:
locked_package["groups"] = ["main"]
locked_package["files"] = []
del lock_data["metadata"]["files"]
def test_run_no_dependencies(installer: Installer, locker: Locker) -> None:
result = installer.run()
assert result == 0
expected = fixture("no-dependencies")
assert locker.written_data == expected
def test_not_fresh_lock(installer: Installer, locker: Locker) -> None:
locker.locked().fresh(False)
with pytest.raises(
ValueError,
match=re.escape(
"pyproject.toml changed significantly since poetry.lock was last generated. "
"Run `poetry lock` to fix the lock file."
),
):
installer.run()
def test_run_with_dependencies(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
result = installer.run()
assert result == 0
expected = fixture("with-dependencies")
assert locker.written_data == expected
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_update_after_removing_dependencies(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.2",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": [], "C": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "~1.1"))
installer.update(True)
result = installer.run()
assert result == 0
expected = fixture("with-dependencies")
assert locker.written_data == expected
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 1
def _configure_run_install_dev(
lock_version: str,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
with_optional_group: bool = False,
with_packages_installed: bool = False,
) -> None:
"""
Perform common test setup for `test_run_install_*dev*()` methods.
"""
lock_data: dict[str, Any] = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.2",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": [], "C": []},
},
}
if lock_version == "2.1":
for locked_package in lock_data["package"]:
locked_package["groups"] = [
"dev" if locked_package["name"] == "C" else "main"
]
locked_package["files"] = []
del lock_data["metadata"]["files"]
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
if with_packages_installed:
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "~1.1"))
group = DependencyGroup("dev", optional=with_optional_group)
group.add_dependency(Factory.create_dependency("C", "~1.2", groups=["dev"]))
package.add_dependency_group(group)
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
@pytest.mark.parametrize("update", [False, True])
@pytest.mark.parametrize("requires_synchronization", [False, True])
@pytest.mark.parametrize(
("groups", "installs", "updates", "removals", "with_packages_installed"),
[
(None, 2, 0, 0, False),
(None, 0, 0, 1, True),
([], 0, 0, 0, False),
([], 0, 0, 3, True),
(["dev"], 1, 0, 0, False),
(["dev"], 0, 0, 2, True),
([MAIN_GROUP], 2, 0, 0, False),
([MAIN_GROUP], 0, 0, 1, True),
([MAIN_GROUP, "dev"], 3, 0, 0, False),
([MAIN_GROUP, "dev"], 0, 0, 0, True),
],
)
def test_run_install_with_dependency_groups(
groups: list[str] | None,
installs: int,
updates: int,
removals: int,
with_packages_installed: bool,
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
update: bool,
requires_synchronization: bool,
lock_version: str,
) -> None:
_configure_run_install_dev(
lock_version,
locker,
repo,
package,
installed,
with_optional_group=True,
with_packages_installed=with_packages_installed,
)
if groups is not None:
installer.only_groups({canonicalize_name(g) for g in groups})
installer.update(update)
installer.requires_synchronization(requires_synchronization)
result = installer.run()
assert result == 0
if not requires_synchronization:
removals = 0
assert installer.executor.installations_count == installs
assert installer.executor.updates_count == updates
assert installer.executor.removals_count == removals
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_install_does_not_remove_locked_packages_if_installed_but_not_required(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
package_a = get_package("a", "1.0")
package_b = get_package("b", "1.1")
package_c = get_package("c", "1.2")
repo.add_package(package_a)
installed.add_package(package_a)
repo.add_package(package_b)
installed.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_c)
installed.add_package(package) # Root package never removed.
package.add_dependency(
Factory.create_dependency(package_a.name, str(package_a.version))
)
lock_data = {
"package": [
{
"name": package_a.name,
"version": package_a.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_b.name,
"version": package_b.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_c.name,
"version": package_c.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {package_a.name: [], package_b.name: [], package_c.name: []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_install_removes_locked_packages_if_installed_and_synchronization_is_required(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
config_installer_reresolve: bool,
) -> None:
package_a = get_package("a", "1.0")
package_b = get_package("b", "1.1")
package_c = get_package("c", "1.2")
repo.add_package(package_a)
installed.add_package(package_a)
repo.add_package(package_b)
installed.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_c)
installed.add_package(package) # Root package never removed.
package.add_dependency(
Factory.create_dependency(package_a.name, str(package_a.version))
)
lock_data = {
"package": [
{
"name": package_a.name,
"version": package_a.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_b.name,
"version": package_b.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_c.name,
"version": package_c.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {package_a.name: [], package_b.name: [], package_c.name: []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
installer.update(True)
installer.requires_synchronization(True)
installer.run()
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 2
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_install_removes_no_longer_locked_packages_if_installed(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
package_a = get_package("a", "1.0")
package_b = get_package("b", "1.1")
package_c = get_package("c", "1.2")
repo.add_package(package_a)
installed.add_package(package_a)
repo.add_package(package_b)
installed.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_c)
installed.add_package(package) # Root package never removed.
package.add_dependency(
Factory.create_dependency(package_a.name, str(package_a.version))
)
lock_data = {
"package": [
{
"name": package_a.name,
"version": package_a.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_b.name,
"version": package_b.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": package_c.name,
"version": package_c.version.text,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {package_a.name: [], package_b.name: [], package_c.name: []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 2
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
@pytest.mark.parametrize(
"managed_reserved_package_names",
[(), ("pip",)],
)
def test_run_install_with_synchronization(
managed_reserved_package_names: tuple[str, ...],
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
package_a = get_package("a", "1.0")
package_b = get_package("b", "1.1")
package_c = get_package("c", "1.2")
package_pip = get_package("pip", "20.0.0")
all_packages = [
package_a,
package_b,
package_c,
package_pip,
]
managed_reserved_packages = [
pkg for pkg in all_packages if pkg.name in managed_reserved_package_names
]
locked_packages = [package_a, *managed_reserved_packages]
for pkg in all_packages:
repo.add_package(pkg)
installed.add_package(pkg)
installed.add_package(package) # Root package never removed.
package.add_dependency(
Factory.create_dependency(package_a.name, str(package_a.version))
)
lock_data = {
"package": [
{
"name": pkg.name,
"version": pkg.version,
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
for pkg in locked_packages
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {pkg.name: [] for pkg in locked_packages},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
installer.update(True)
installer.requires_synchronization(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 2 + len(managed_reserved_packages)
expected_removals = {
package_b.name,
package_c.name,
*managed_reserved_package_names,
}
assert isinstance(installer.executor, TestExecutor)
assert {r.name for r in installer.executor.removals} == expected_removals
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_whitelist_add(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0")
package_a_new = get_package("A", "1.1")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_a_new)
repo.add_package(package_b)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
installer.update(True)
installer.whitelist(["B"])
result = installer.run()
assert result == 0
expected = fixture("with-dependencies")
assert locker.written_data == expected
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_whitelist_remove(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
installed.add_package(package_b)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
installer.update(True)
installer.whitelist(["B"])
result = installer.run()
assert result == 0
expected = fixture("remove")
assert locker.written_data == expected
assert installer.executor.installations_count == 1
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 1
def test_add_with_sub_dependencies(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
package_d = get_package("D", "1.3")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
package_a.add_dependency(Factory.create_dependency("D", "^1.0"))
package_b.add_dependency(Factory.create_dependency("C", "~1.2"))
result = installer.run()
assert result == 0
expected = fixture("with-sub-dependencies")
assert locker.written_data == expected
def test_run_with_python_versions(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package.python_versions = "~2.7 || ^3.4"
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c12.python_versions = "~2.7 || ^3.3"
package_c13 = get_package("C", "1.3")
package_c13.python_versions = "~3.3"
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
package.add_dependency(Factory.create_dependency("C", "^1.0"))
result = installer.run()
assert result == 0
expected = fixture("with-python-versions")
assert locker.written_data == expected
def test_run_with_optional_and_python_restricted_dependencies(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package.python_versions = "~2.7 || ^3.4"
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c13 = get_package("C", "1.3")
package_d = get_package("D", "1.4")
package_c13.add_dependency(Factory.create_dependency("D", "^1.2"))
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
repo.add_package(package_d)
package.extras = {canonicalize_name("foo"): [get_dependency("A", "~1.0")]}
dep_a = Factory.create_dependency("A", {"version": "~1.0", "optional": True})
dep_a._in_extras = [canonicalize_name("foo")]
package.add_dependency(dep_a)
package.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "python": "~2.4"})
)
package.add_dependency(
Factory.create_dependency("C", {"version": "^1.0", "python": "~2.7 || ^3.4"})
)
result = installer.run()
assert result == 0
expected = fixture("with-optional-dependencies")
assert locker.written_data == expected
# We should only have 2 installs:
# C,D since python version is not compatible
# with B's python constraint and A is optional
assert isinstance(installer.executor, TestExecutor)
assert installer.executor.installations_count == 2
assert installer.executor.installations[0].name == "d"
assert installer.executor.installations[1].name == "c"
def test_run_with_optional_and_platform_restricted_dependencies(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
mocker: MockerFixture,
) -> None:
mocker.patch("sys.platform", "darwin")
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c13 = get_package("C", "1.3")
package_d = get_package("D", "1.4")
package_c13.add_dependency(Factory.create_dependency("D", "^1.2"))
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
repo.add_package(package_d)
package.extras = {canonicalize_name("foo"): [get_dependency("A", "~1.0")]}
dep_a = Factory.create_dependency("A", {"version": "~1.0", "optional": True})
dep_a._in_extras = [canonicalize_name("foo")]
package.add_dependency(dep_a)
package.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "platform": "custom"})
)
package.add_dependency(
Factory.create_dependency("C", {"version": "^1.0", "platform": "darwin"})
)
result = installer.run()
assert result == 0
expected = fixture("with-platform-dependencies")
assert locker.written_data == expected
# We should only have 2 installs:
# C,D since the mocked python version is not compatible
# with B's python constraint and A is optional
assert isinstance(installer.executor, TestExecutor)
assert installer.executor.installations_count == 2
assert installer.executor.installations[0].name == "d"
assert installer.executor.installations[1].name == "c"
def test_run_with_dependencies_extras(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_b.extras = {canonicalize_name("foo"): [get_dependency("C", "^1.0")]}
package_b.add_dependency(
Factory.create_dependency("C", {"version": "^1.0", "optional": True})
)
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
package.add_dependency(Factory.create_dependency("A", "^1.0"))
package.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "extras": ["foo"]})
)
result = installer.run()
assert result == 0
expected = fixture("with-dependencies-extras")
assert locker.written_data == expected
def test_run_with_dependencies_nested_extras(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
dependency_c = Factory.create_dependency("C", {"version": "^1.0", "optional": True})
dependency_b = Factory.create_dependency(
"B", {"version": "^1.0", "optional": True, "extras": ["C"]}
)
dependency_a = Factory.create_dependency("A", {"version": "^1.0", "extras": ["B"]})
package_b.extras = {canonicalize_name("c"): [dependency_c]}
package_b.add_dependency(dependency_c)
package_a.add_dependency(dependency_b)
package_a.extras = {canonicalize_name("b"): [dependency_b]}
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
package.add_dependency(dependency_a)
result = installer.run()
assert result == 0
expected = fixture("with-dependencies-nested-extras")
assert locker.written_data == expected
@pytest.mark.parametrize(
"enabled_extras",
[
([]),
(["all"]),
(["nested"]),
(["install", "download"]),
(["install"]),
(["download"]),
],
)
@pytest.mark.parametrize("top_level_dependency", [True, False])
def test_solver_resolves_self_referential_extras(
enabled_extras: list[str],
top_level_dependency: bool,
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
create_package: PackageFactory,
) -> None:
dependency = (
create_package(
"A",
str(package.version),
extras={
"download": ["download-package"],
"install": ["install-package"],
"py38": ["py38-package ; python_version == '3.8'"],
"py310": ["py310-package ; python_version > '3.8'"],
"all": ["a[download,install]"],
"py": ["a[py38,py310]"],
"nested": ["a[all]"],
},
)
.to_dependency()
.with_features(enabled_extras)
)
if not top_level_dependency:
dependency = create_package(
"B", "1.0", dependencies=[dependency]
).to_dependency()
package.add_dependency(dependency)
result = installer.run()
assert result == 0
name = "-".join(
[
"with-self-referencing-extras",
*enabled_extras,
"top" if top_level_dependency else "deep",
]
)
expected = fixture(name)
assert locker.written_data == expected
def test_solver_resolves_self_referential_extras_with_markers(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
create_package: PackageFactory,
) -> None:
package.add_dependency(
Factory.create_dependency("A", {"version": "*", "extras": ["all"]})
)
create_package(
"A",
str(package.version),
extras={
"download": ["download-package"],
"install": ["install-package"],
"all": ["a[download,install] ; python_version < '3.9'"],
},
)
result = installer.run()
assert result == 0
name = "-".join(["with-self-referencing-extras", "b", "markers"])
# FIXME: At the time of writing this test case, the markers from self-ref extras are not
# correctly propagated into the dependency specs. For example, given this case,
# the package "install-package" should have a final marker of
# "extra == 'install' or extra == 'all' and python_version < '3.9'".
expected = fixture(name)
assert locker.written_data == expected
@pytest.mark.parametrize("root", [True, False])
@pytest.mark.parametrize("locked", [False, True])
@pytest.mark.parametrize("extra", [None, "extra-one", "extra-two"])
def test_run_with_conflicting_dependency_extras(
installer: Installer,
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
repo: Repository,
config: Config,
package: ProjectPackage,
extra: str | None,
locked: bool,
root: bool,
) -> None:
"""
- https://github.com/python-poetry/poetry/issues/6419
Tests resolution of extras with conflicting dependencies. Tests in both as direct dependencies of
root package and as transitive dependencies.
"""
# A package with two optional dependencies, one for each extra
# If root, this is the root package, otherwise an intermediate package
main_package = package if root else get_package("intermediate-dep", "1.0.0")
# Two conflicting versions of a dependency, one in each extra
conflicting_dep_one_pkg = get_package("conflicting-dep", "1.1.0")
conflicting_dep_two_pkg = get_package("conflicting-dep", "1.2.0")
conflicting_dep_one = Factory.create_dependency(
"conflicting-dep",
{
"version": "1.1.0",
"markers": "extra == 'extra-one' and extra != 'extra-two'",
"optional": True,
},
)
conflicting_dep_two = Factory.create_dependency(
"conflicting-dep",
{
"version": "1.2.0",
"markers": "extra != 'extra-one' and extra == 'extra-two'",
"optional": True,
},
)
# Include both just for extra validation that our marker validation works as expected
main_package.extras = {
canonicalize_name("extra-one"): [conflicting_dep_one, conflicting_dep_two],
canonicalize_name("extra-two"): [conflicting_dep_one, conflicting_dep_two],
}
main_package.add_dependency(conflicting_dep_one)
main_package.add_dependency(conflicting_dep_two)
repo.add_package(conflicting_dep_one_pkg)
repo.add_package(conflicting_dep_two_pkg)
if not root:
repo.add_package(main_package)
# If we have an intermediate package, add extras to our root package
if not root:
extra_one_dep = Factory.create_dependency(
"intermediate-dep",
{
"version": "1.0.0",
"markers": "extra == 'root-extra-one' and extra != 'root-extra-two'",
"extras": ["extra-one"],
"optional": True,
},
)
extra_two_dep = Factory.create_dependency(
"intermediate-dep",
{
"version": "1.0.0",
"markers": "extra != 'root-extra-one' and extra == 'root-extra-two'",
"extras": ["extra-two"],
"optional": True,
},
)
package.add_dependency(extra_one_dep)
package.add_dependency(extra_two_dep)
# Include both just for extra validation that our marker validation works as expected
package.extras = {
canonicalize_name("root-extra-one"): [extra_one_dep, extra_two_dep],
canonicalize_name("root-extra-two"): [extra_one_dep, extra_two_dep],
}
fixture_name = "with-conflicting-dependency-extras-" + (
"root" if root else "transitive"
)
locker.locked(locked)
if locked:
locker.mock_lock_data(dict(fixture(fixture_name)))
if extra is not None:
extras = [f"root-{extra}"] if not root else [extra]
installer.extras(extras)
result = installer.run()
assert result == 0
if not locked:
expected = fixture(fixture_name)
assert locker.written_data == expected
# Results of installation are consistent with the 'extra' input
assert isinstance(installer.executor, TestExecutor)
expected_installations = []
if extra == "extra-one":
expected_installations.append(conflicting_dep_one_pkg)
elif extra == "extra-two":
expected_installations.append(conflicting_dep_two_pkg)
if not root and extra is not None:
expected_installations.append(get_package("intermediate-dep", "1.0.0"))
assert len(installer.executor.installations) == len(expected_installations)
assert set(installer.executor.installations) == set(expected_installations)
@pytest.mark.parametrize("locked", [True, False])
@pytest.mark.parametrize("extra", [None, "cpu", "cuda"])
def test_run_with_exclusive_extras_different_sources(
installer: Installer,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
package: ProjectPackage,
extra: str | None,
locked: bool,
) -> None:
"""
- https://github.com/python-poetry/poetry/issues/6409
- https://github.com/python-poetry/poetry/issues/6419
- https://github.com/python-poetry/poetry/issues/7748
- https://github.com/python-poetry/poetry/issues/9537
"""
# Setup repo for each of our sources
cpu_repo = Repository("pytorch-cpu")
cuda_repo = Repository("pytorch-cuda")
pool = RepositoryPool()
pool.add_repository(cpu_repo)
pool.add_repository(cuda_repo)
config.config["repositories"] = {
"pytorch-cpu": {"url": "https://download.pytorch.org/whl/cpu"},
"pytorch-cuda": {"url": "https://download.pytorch.org/whl/cuda"},
}
# Configure packages that read from each of the different sources
torch_cpu_pkg = get_package("torch", "1.11.0+cpu")
torch_cpu_pkg._source_reference = "pytorch-cpu"
torch_cpu_pkg._source_type = "legacy"
torch_cpu_pkg._source_url = "https://download.pytorch.org/whl/cpu"
torch_cuda_pkg = get_package("torch", "1.11.0+cuda")
torch_cuda_pkg._source_reference = "pytorch-cuda"
torch_cuda_pkg._source_type = "legacy"
torch_cuda_pkg._source_url = "https://download.pytorch.org/whl/cuda"
cpu_repo.add_package(torch_cpu_pkg)
cuda_repo.add_package(torch_cuda_pkg)
# Depend on each package based on exclusive extras
torch_cpu_dep = Factory.create_dependency(
"torch",
{
"version": "1.11.0+cpu",
"markers": "extra == 'cpu' and extra != 'cuda'",
"source": "pytorch-cpu",
},
)
torch_cuda_dep = Factory.create_dependency(
"torch",
{
"version": "1.11.0+cuda",
"markers": "extra != 'cpu' and extra == 'cuda'",
"source": "pytorch-cuda",
},
)
package.add_dependency(torch_cpu_dep)
package.add_dependency(torch_cuda_dep)
# We don't want to cheat by only including the correct dependency in the 'extra' mapping
package.extras = {
canonicalize_name("cpu"): [torch_cpu_dep, torch_cuda_dep],
canonicalize_name("cuda"): [torch_cpu_dep, torch_cuda_dep],
}
# Set locker state
locker.locked(locked)
if locked:
locker.mock_lock_data(dict(fixture("with-exclusive-extras")))
# Perform install
installer = Installer(
NullIO(),
MockEnv(),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(),
pool,
config,
NullIO(),
),
)
if extra is not None:
installer.extras([extra])
result = installer.run()
assert result == 0
# Results of locking are expected and installation are consistent with the 'extra' input
if not locked:
expected = fixture("with-exclusive-extras")
assert locker.written_data == expected
assert isinstance(installer.executor, TestExecutor)
if extra is None:
assert len(installer.executor.installations) == 0
else:
assert len(installer.executor.installations) == 1
version = f"1.11.0+{extra}"
source_url = f"https://download.pytorch.org/whl/{extra}"
source_reference = f"pytorch-{extra}"
assert installer.executor.installations[0] == Package(
"torch",
version,
source_type="legacy",
source_url=source_url,
source_reference=source_reference,
)
@pytest.mark.parametrize("locked", [True, False])
@pytest.mark.parametrize("extra", [None, "extra-one", "extra-two"])
def test_run_with_different_dependency_extras(
installer: Installer,
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
repo: Repository,
config: Config,
package: ProjectPackage,
extra: str | None,
locked: bool,
) -> None:
"""
- https://github.com/python-poetry/poetry/issues/834
- https://github.com/python-poetry/poetry/issues/7748
This tests different sets of extras in a dependency of the root project. These different dependency extras are
themselves conditioned on extras in the root project.
"""
# Three packages in addition to root: demo (direct dependency) and two transitive dep packages
demo_pkg = get_package("demo", "1.0.0")
transitive_one_pkg = get_package("transitive-dep-one", "1.1.0")
transitive_two_pkg = get_package("transitive-dep-two", "1.2.0")
# Switch each transitive dependency based on extra markers in the 'demo' package
transitive_dep_one = Factory.create_dependency(
"transitive-dep-one",
{
"version": "1.1.0",
"markers": "extra == 'demo-extra-one' and extra != 'demo-extra-two'",
"optional": True,
},
)
transitive_dep_two = Factory.create_dependency(
"transitive-dep-two",
{
"version": "1.2.0",
"markers": "extra != 'demo-extra-one' and extra == 'demo-extra-two'",
"optional": True,
},
)
# Include both packages in both demo extras, to validate that they're filtered out based on extra markers alone
demo_pkg.extras = {
canonicalize_name("demo-extra-one"): [
get_dependency("transitive-dep-one"),
get_dependency("transitive-dep-two"),
],
canonicalize_name("demo-extra-two"): [
get_dependency("transitive-dep-one"),
get_dependency("transitive-dep-two"),
],
}
demo_pkg.add_dependency(transitive_dep_one)
demo_pkg.add_dependency(transitive_dep_two)
# Now define the demo dependency, similarly switched on extra markers in the root package
extra_one_dep = Factory.create_dependency(
"demo",
{
"version": "1.0.0",
"markers": "extra == 'extra-one' and extra != 'extra-two'",
"extras": ["demo-extra-one"],
},
)
extra_two_dep = Factory.create_dependency(
"demo",
{
"version": "1.0.0",
"markers": "extra != 'extra-one' and extra == 'extra-two'",
"extras": ["demo-extra-two"],
},
)
package.add_dependency(extra_one_dep)
package.add_dependency(extra_two_dep)
# Again we don't want to cheat by only including the correct dependency in the 'extra' mapping
package.extras = {
canonicalize_name("extra-one"): [extra_one_dep, extra_two_dep],
canonicalize_name("extra-two"): [extra_one_dep, extra_two_dep],
}
repo.add_package(demo_pkg)
repo.add_package(transitive_one_pkg)
repo.add_package(transitive_two_pkg)
locker.locked(locked)
if locked:
locker.mock_lock_data(dict(fixture("with-dependencies-differing-extras")))
installer = Installer(
NullIO(),
MockEnv(),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(),
pool,
config,
NullIO(),
),
)
if extra is not None:
installer.extras([extra])
result = installer.run()
assert result == 0
if not locked:
expected = fixture("with-dependencies-differing-extras")
assert locker.written_data == expected
# Results of installation are consistent with the 'extra' input
assert isinstance(installer.executor, TestExecutor)
if extra is None:
assert len(installer.executor.installations) == 0
else:
assert len(installer.executor.installations) == 2
@pytest.mark.parametrize("is_locked", [False, True])
@pytest.mark.parametrize("is_installed", [False, True])
@pytest.mark.parametrize("with_extras", [False, True])
@pytest.mark.parametrize("do_update", [False, True])
@pytest.mark.parametrize("do_sync", [False, True])
def test_run_installs_extras_with_deps_if_requested(
installer: Installer,
locker: Locker,
repo: Repository,
installed: CustomInstalledRepository,
package: ProjectPackage,
is_locked: bool,
is_installed: bool,
with_extras: bool,
do_update: bool,
do_sync: bool,
) -> None:
package.extras = {canonicalize_name("foo"): [get_dependency("C")]}
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_d = get_package("D", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency(Factory.create_dependency("A", "^1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
dep_c = Factory.create_dependency("C", {"version": "^1.0", "optional": True})
dep_c._in_extras = [canonicalize_name("foo")]
package.add_dependency(dep_c)
package_c.add_dependency(Factory.create_dependency("D", "^1.0"))
if is_locked:
locker.locked(True)
locker.mock_lock_data(fixture("extras-with-dependencies"))
if is_installed:
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
installed.add_package(package_d)
if with_extras:
installer.extras(["foo"])
installer.update(do_update)
installer.requires_synchronization(do_sync)
result = installer.run()
assert result == 0
if not is_locked:
assert locker.written_data == fixture("extras-with-dependencies")
if with_extras:
# A, B, C, D
expected_installations_count = 0 if is_installed else 4
expected_removals_count = 0
else:
# A, B
expected_installations_count = 0 if is_installed else 2
# We only want to uninstall extras if we do a "poetry install" without extras,
# not if we do a "poetry update" or "poetry add".
expected_removals_count = 2 if is_installed and do_sync else 0
assert installer.executor.installations_count == expected_installations_count
assert installer.executor.removals_count == expected_removals_count
def test_installer_with_pypi_repository(
package: ProjectPackage,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
env: NullEnv,
pypi_repository: PyPiRepository,
) -> None:
pool = RepositoryPool()
pool.add_repository(pypi_repository)
installer = Installer(
NullIO(), env, package, locker, pool, config, installed=installed
)
package.python_versions = ">=3.7"
package.add_dependency(Factory.create_dependency("pytest", "^3.5", groups=["dev"]))
result = installer.run()
assert result == 0
expected = fixture("with-pypi-repository")
assert locker.written_data == expected
def test_run_installs_with_local_file(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
fixture_dir: FixtureDirGetter,
) -> None:
root_dir = Path(__file__).parent.parent.parent
package.root_dir = root_dir
locker.set_lock_path(root_dir)
file_path = fixture_dir("distributions/demo-0.1.0-py2.py3-none-any.whl")
package.add_dependency(
Factory.create_dependency(
"demo", {"file": str(file_path.relative_to(root_dir))}, root_dir=root_dir
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
result = installer.run()
assert result == 0
expected = fixture("with-file-dependency")
assert locker.written_data == expected
assert installer.executor.installations_count == 2
def test_run_installs_wheel_with_no_requires_dist(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
fixture_dir: FixtureDirGetter,
) -> None:
root_dir = Path(__file__).parent.parent.parent
package.root_dir = root_dir
locker.set_lock_path(root_dir)
file_path = fixture_dir(
"wheel_with_no_requires_dist/demo-0.1.0-py2.py3-none-any.whl"
)
package.add_dependency(
Factory.create_dependency(
"demo", {"file": str(file_path.relative_to(root_dir))}, root_dir=root_dir
)
)
result = installer.run()
assert result == 0
expected = fixture("with-wheel-dependency-no-requires-dist")
assert locker.written_data == expected
assert installer.executor.installations_count == 1
def test_run_installs_with_local_poetry_directory_and_extras(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
tmpdir: Path,
fixture_dir: FixtureDirGetter,
) -> None:
root_dir = Path(__file__).parent.parent.parent
package.root_dir = root_dir
locker.set_lock_path(root_dir)
file_path = fixture_dir("project_with_extras")
package.add_dependency(
Factory.create_dependency(
"project-with-extras",
{"path": str(file_path.relative_to(root_dir)), "extras": ["extras_a"]},
root_dir=root_dir,
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
result = installer.run()
assert result == 0
expected = fixture("with-directory-dependency-poetry")
assert locker.written_data == expected
assert installer.executor.installations_count == 2
@pytest.mark.parametrize("skip_directory", [True, False])
def test_run_installs_with_local_poetry_directory_and_skip_directory_flag(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
fixture_dir: FixtureDirGetter,
skip_directory: bool,
) -> None:
"""When we set Installer.skip_directory(True) no path dependencies should
be installed (including transitive dependencies).
"""
root_dir = fixture_dir("directory")
package.root_dir = root_dir
locker.set_lock_path(root_dir)
directory = root_dir.joinpath("project_with_transitive_directory_dependencies")
package.add_dependency(
Factory.create_dependency(
"project-with-transitive-directory-dependencies",
{"path": str(directory.relative_to(root_dir))},
root_dir=root_dir,
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cachy", "0.2.0"))
installer.skip_directory(skip_directory)
result = installer.run()
assert result == 0
expected = fixture("with-directory-dependency-poetry-transitive")
assert locker.written_data == expected
assert isinstance(installer.executor, TestExecutor)
directory_installs = [
p.name for p in installer.executor.installations if p.source_type == "directory"
]
if skip_directory:
assert not directory_installs, directory_installs
assert installer.executor.installations_count == 2
else:
assert directory_installs, directory_installs
assert installer.executor.installations_count == 6
def test_run_installs_with_local_poetry_file_transitive(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
tmpdir: str,
fixture_dir: FixtureDirGetter,
) -> None:
root_dir = fixture_dir("directory")
package.root_dir = root_dir
locker.set_lock_path(root_dir)
directory = fixture_dir("directory").joinpath(
"project_with_transitive_file_dependencies"
)
package.add_dependency(
Factory.create_dependency(
"project-with-transitive-file-dependencies",
{"path": str(directory.relative_to(root_dir))},
root_dir=root_dir,
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cachy", "0.2.0"))
result = installer.run()
assert result == 0
expected = fixture("with-file-dependency-transitive")
assert locker.written_data == expected
assert installer.executor.installations_count == 4
def test_run_installs_with_local_setuptools_directory(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
tmp_path: Path,
fixture_dir: FixtureDirGetter,
) -> None:
root_dir = tmp_path / "root"
package.root_dir = root_dir
locker.set_lock_path(root_dir)
file_path = shutil.copytree(fixture_dir("project_with_setup"), root_dir / "project")
package.add_dependency(
Factory.create_dependency(
"project-with-setup",
{"path": str(file_path.relative_to(root_dir))},
root_dir=root_dir,
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cachy", "0.2.0"))
result = installer.run()
assert result == 0
expected = fixture("with-directory-dependency-setuptools")
assert locker.written_data == expected
assert installer.executor.installations_count == 3
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_with_prereleases(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0a2",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0a2")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency(
Factory.create_dependency("A", {"version": "*", "allow-prereleases": True})
)
package.add_dependency(Factory.create_dependency("B", "^1.1"))
installer.update(True)
installer.whitelist({"B": "^1.1"})
result = installer.run()
assert result == 0
expected = fixture("with-prereleases")
assert locker.written_data == expected
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_update_all_with_lock(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": True,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.1")
repo.add_package(get_package("A", "1.0"))
repo.add_package(package_a)
package.add_dependency(Factory.create_dependency("A", "*"))
installer.update(True)
result = installer.run()
assert result == 0
expected = fixture("update-with-lock")
assert locker.written_data == expected
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_update_with_locked_extras(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"B": "^1.0", "C": "^1.0"},
},
{
"name": "B",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.1",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"requirements": {"python": "~2.7"},
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": [], "C": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package_a = get_package("A", "1.0")
package_a.extras = {canonicalize_name("foo"): [get_dependency("B")]}
b_dependency = get_dependency("B", "^1.0", optional=True)
b_dependency._in_extras = [canonicalize_name("foo")]
c_dependency = get_dependency("C", "^1.0")
c_dependency.python_versions = "~2.7"
package_a.add_dependency(b_dependency)
package_a.add_dependency(c_dependency)
repo.add_package(package_a)
repo.add_package(get_package("B", "1.0"))
repo.add_package(get_package("C", "1.1"))
repo.add_package(get_package("D", "1.1"))
package.add_dependency(
Factory.create_dependency("A", {"version": "^1.0", "extras": ["foo"]})
)
package.add_dependency(Factory.create_dependency("D", "^1.0"))
installer.update(True)
installer.whitelist("D")
result = installer.run()
assert result == 0
expected = fixture("update-with-locked-extras")
assert locker.written_data == expected
def test_run_install_duplicate_dependencies_different_constraints(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
package.add_dependency(Factory.create_dependency("A", "*"))
package_a = get_package("A", "1.0")
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "python": "<4.0"})
)
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^2.0", "python": ">=4.0"})
)
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency(Factory.create_dependency("C", "1.2"))
package_b20.add_dependency(Factory.create_dependency("C", "1.5"))
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
result = installer.run()
assert result == 0
expected = fixture("with-duplicate-dependencies")
assert locker.written_data == expected
assert isinstance(installer.executor, TestExecutor)
installs = installer.executor.installations
assert installer.executor.installations_count == 3
assert installs[0] == package_c12
assert installs[1] == package_b10
assert installs[2] == package_a
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_install_duplicate_dependencies_different_constraints_with_lock(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {
"B": [
{"version": "^1.0", "python": "<4.0"},
{"version": "^2.0", "python": ">=4.0"},
]
},
},
{
"name": "B",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.2"},
"requirements": {"python": "<4.0"},
},
{
"name": "B",
"version": "2.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.5"},
"requirements": {"python": ">=4.0"},
},
{
"name": "C",
"version": "1.2",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.5",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": [], "C": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package.add_dependency(Factory.create_dependency("A", "*"))
package_a = get_package("A", "1.0")
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "python": "<4.0"})
)
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^2.0", "python": ">=4.0"})
)
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency(Factory.create_dependency("C", "1.2"))
package_b20.add_dependency(Factory.create_dependency("C", "1.5"))
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
installer.update(True)
result = installer.run()
assert result == 0
expected = fixture("with-duplicate-dependencies")
assert locker.written_data == expected
assert installer.executor.installations_count == 3
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_update_uninstalls_after_removal_transitive_dependency(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"B": {"version": "^1.0", "python": "<2.0"}},
},
{
"name": "B",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package.add_dependency(Factory.create_dependency("A", "*"))
package_a = get_package("A", "1.0")
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "python": "<2.0"})
)
package_b10 = get_package("B", "1.0")
repo.add_package(package_a)
repo.add_package(package_b10)
installed.add_package(get_package("A", "1.0"))
installed.add_package(get_package("B", "1.0"))
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 1
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_run_install_duplicate_dependencies_different_constraints_with_lock_update(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {
"B": [
{"version": "^1.0", "python": "<2.7"},
{"version": "^2.0", "python": ">=2.7"},
]
},
},
{
"name": "B",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.2"},
"requirements": {"python": "<2.7"},
},
{
"name": "B",
"version": "2.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.5"},
"requirements": {"python": ">=2.7"},
},
{
"name": "C",
"version": "1.2",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.5",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": [], "C": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package.add_dependency(Factory.create_dependency("A", "*"))
package_a = get_package("A", "1.1")
package_a.add_dependency(Factory.create_dependency("B", "^2.0"))
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency(Factory.create_dependency("C", "1.2"))
package_b20.add_dependency(Factory.create_dependency("C", "1.5"))
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
installed.add_package(get_package("A", "1.0"))
installer.update(True)
installer.whitelist(["A"])
result = installer.run()
assert result == 0
expected = fixture("with-duplicate-dependencies-update")
assert locker.written_data == expected
assert installer.executor.installations_count == 2
assert installer.executor.updates_count == 1
assert installer.executor.removals_count == 0
def test_installer_test_solver_finds_compatible_package_for_dependency_python_not_fully_compatible_with_package_python(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
) -> None:
package.python_versions = "~2.7 || ^3.4"
package.add_dependency(
Factory.create_dependency("A", {"version": "^1.0", "python": "^3.5"})
)
package_a101 = get_package("A", "1.0.1")
package_a101.python_versions = ">=3.6"
package_a100 = get_package("A", "1.0.0")
package_a100.python_versions = ">=3.5"
repo.add_package(package_a100)
repo.add_package(package_a101)
result = installer.run()
assert result == 0
expected = fixture("with-conditional-dependency")
assert locker.written_data == expected
assert installer.executor.installations_count == 1
def test_installer_required_extras_should_not_be_removed_when_updating_single_dependency(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
env: NullEnv,
pool: RepositoryPool,
config: Config,
) -> None:
package.add_dependency(Factory.create_dependency("A", {"version": "^1.0"}))
package_a = get_package("A", "1.0.0")
package_a.add_dependency(
Factory.create_dependency("B", {"version": "^1.0", "extras": ["foo"]})
)
package_b = get_package("B", "1.0.0")
package_b.add_dependency(
Factory.create_dependency("C", {"version": "^1.0", "optional": True})
)
package_b.extras = {canonicalize_name("foo"): [get_dependency("C")]}
package_c = get_package("C", "1.0.0")
package_d = get_package("D", "1.0.0")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 3
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
package.add_dependency(Factory.create_dependency("D", "^1.0"))
locker.locked(True)
locker.mock_lock_data(locker.written_data)
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
installer.update(True)
installer.whitelist(["D"])
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 1
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
def test_installer_required_extras_should_not_be_removed_when_updating_single_dependency_pypi_repository(
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
env: NullEnv,
mocker: MockerFixture,
config: Config,
pypi_repository: PyPiRepository,
) -> None:
mocker.patch("sys.platform", "darwin")
pool = RepositoryPool()
pool.add_repository(pypi_repository)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
package.add_dependency(
Factory.create_dependency(
"with-transitive-extra-dependency", {"version": "^0.12"}
)
)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 3
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
package.add_dependency(Factory.create_dependency("pytest", "^3.5"))
locker.locked(True)
locker.mock_lock_data(locker.written_data)
assert isinstance(installer.executor, TestExecutor)
for pkg in installer.executor.installations:
installed.add_package(pkg)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
installer.update(True)
installer.whitelist(["pytest"])
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 7
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
def test_installer_required_extras_should_be_installed(
locker: Locker,
repo: Repository,
package: ProjectPackage,
installed: CustomInstalledRepository,
env: NullEnv,
config: Config,
pypi_repository: PyPiRepository,
) -> None:
pool = RepositoryPool()
pool.add_repository(pypi_repository)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
package.add_dependency(
Factory.create_dependency(
"with-extra-dependency", {"version": "^0.12", "extras": ["filecache"]}
)
)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 2
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
locker.locked(True)
locker.mock_lock_data(locker.written_data)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 2
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_update_multiple_times_with_split_dependencies_is_idempotent(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "A",
"version": "1.0",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"B": ">=1.0"},
},
{
"name": "B",
"version": "1.0.1",
"optional": False,
"platform": "*",
"python-versions": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
"checksum": [],
"dependencies": {},
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
"files": {"A": [], "B": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package.python_versions = "~2.7 || ^3.4"
package.add_dependency(Factory.create_dependency("A", "^1.0"))
a10 = get_package("A", "1.0")
a11 = get_package("A", "1.1")
a11.add_dependency(Factory.create_dependency("B", ">=1.0.1"))
a11.add_dependency(
Factory.create_dependency("C", {"version": "^1.0", "python": "~2.7"})
)
a11.add_dependency(
Factory.create_dependency("C", {"version": "^2.0", "python": "^3.4"})
)
b101 = get_package("B", "1.0.1")
b110 = get_package("B", "1.1.0")
repo.add_package(a10)
repo.add_package(a11)
repo.add_package(b101)
repo.add_package(b110)
repo.add_package(get_package("C", "1.0"))
repo.add_package(get_package("C", "2.0"))
expected = fixture("with-multiple-updates")
installer.update(True)
result = installer.run()
assert result == 0
assert locker.written_data == expected
locker.mock_lock_data(locker.written_data)
installer.update(True)
result = installer.run()
assert result == 0
assert locker.written_data == expected
locker.mock_lock_data(locker.written_data)
installer.update(True)
result = installer.run()
assert result == 0
assert locker.written_data == expected
def test_installer_can_install_dependencies_from_forced_source(
locker: Locker,
package: ProjectPackage,
installed: CustomInstalledRepository,
env: NullEnv,
config: Config,
legacy_repository: LegacyRepository,
pypi_repository: PyPiRepository,
) -> None:
package.python_versions = "^3.7"
package.add_dependency(
Factory.create_dependency("tomlkit", {"version": "^0.5", "source": "legacy"})
)
pool = RepositoryPool()
pool.add_repository(legacy_repository)
pool.add_repository(pypi_repository)
installer = Installer(
NullIO(),
env,
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(env, pool, config, NullIO()),
)
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 1
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
def test_run_installs_with_url_file(
installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage
) -> None:
url = "https://files.pythonhosted.org/distributions/demo-0.1.0-py2.py3-none-any.whl"
package.add_dependency(Factory.create_dependency("demo", {"url": url}))
repo.add_package(get_package("pendulum", "1.4.4"))
result = installer.run()
assert result == 0
expected = fixture("with-url-dependency")
assert locker.written_data == expected
assert installer.executor.installations_count == 2
@pytest.mark.parametrize("env_platform", ["linux", "win32"])
def test_run_installs_with_same_version_url_files(
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
repo: Repository,
package: ProjectPackage,
env_platform: str,
) -> None:
urls = {
"linux": "https://files.pythonhosted.org/distributions/demo-0.1.0.tar.gz",
"win32": (
"https://files.pythonhosted.org/distributions/demo-0.1.0-py2.py3-none-any.whl"
),
}
for platform, url in urls.items():
package.add_dependency(
Factory.create_dependency(
"demo",
{"url": url, "markers": f"sys_platform == '{platform}'"},
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
installer = Installer(
NullIO(),
MockEnv(platform=env_platform),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(platform=env_platform),
pool,
config,
NullIO(),
),
)
result = installer.run()
assert result == 0
expected = fixture("with-same-version-url-dependencies")
assert locker.written_data == expected
assert isinstance(installer.executor, TestExecutor)
assert installer.executor.installations_count == 2
demo_package = next(p for p in installer.executor.installations if p.name == "demo")
assert demo_package.source_url == urls[env_platform]
def test_installer_uses_prereleases_if_they_are_compatible(
installer: Installer, locker: Locker, package: ProjectPackage, repo: Repository
) -> None:
package.python_versions = "~2.7 || ^3.4"
package.add_dependency(
Factory.create_dependency(
"prerelease", {"git": "https://github.com/demo/prerelease.git"}
)
)
package_b = get_package("b", "2.0.0")
package_b.add_dependency(Factory.create_dependency("prerelease", ">=0.19"))
repo.add_package(package_b)
result = installer.run()
assert result == 0
locker.locked(True)
locker.mock_lock_data(locker.written_data)
package.add_dependency(Factory.create_dependency("b", "^2.0.0"))
installer.whitelist(["b"])
installer.update(True)
result = installer.run()
assert result == 0
assert installer.executor.installations_count == 2
def test_installer_does_not_write_lock_file_when_installation_fails(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
mocker: MockerFixture,
) -> None:
repo.add_package(get_package("A", "1.0"))
package.add_dependency(Factory.create_dependency("A", "~1.0"))
locker.locked(False)
mocker.patch("poetry.installation.installer.Installer._execute", return_value=1)
result = installer.run()
assert result == 1 # error
assert locker._lock_data is None
assert installer.executor.installations_count == 0
assert installer.executor.updates_count == 0
assert installer.executor.removals_count == 0
@pytest.mark.parametrize("quiet", [True, False])
def test_run_with_dependencies_quiet(
installer: Installer,
locker: Locker,
repo: Repository,
package: ProjectPackage,
quiet: bool,
) -> None:
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
installer._io = BufferedIO(Input())
installer._io.set_verbosity(Verbosity.QUIET if quiet else Verbosity.NORMAL)
package.add_dependency(Factory.create_dependency("A", "~1.0"))
package.add_dependency(Factory.create_dependency("B", "^1.0"))
result = installer.run()
assert result == 0
expected = fixture("with-dependencies")
assert locker.written_data == expected
output = installer._io.fetch_output()
if quiet:
assert output == ""
else:
assert output != ""
@pytest.mark.parametrize("lock_version", ("1.1", "2.1"))
def test_installer_should_use_the_locked_version_of_git_dependencies(
installer: Installer,
locker: Locker,
package: ProjectPackage,
repo: Repository,
lock_version: str,
) -> None:
lock_data = {
"package": [
{
"name": "demo",
"version": "0.1.1",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"pendulum": ">=1.4.4"},
"source": {
"type": "git",
"url": "https://github.com/demo/demo.git",
"reference": "master",
"resolved_reference": "123456",
},
},
{
"name": "pendulum",
"version": "1.4.4",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {},
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"files": {"demo": [], "pendulum": []},
},
}
fix_lock_data(lock_data)
locker.locked(True)
locker.mock_lock_data(lock_data)
package.add_dependency(
Factory.create_dependency(
"demo", {"git": "https://github.com/demo/demo.git", "branch": "master"}
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
result = installer.run()
assert result == 0
assert isinstance(installer.executor, TestExecutor)
demo_installation = next(
package
for package in installer.executor.installations
if package.name == "demo"
)
assert demo_installation == Package(
"demo",
"0.1.1",
source_type="git",
source_url="https://github.com/demo/demo.git",
source_reference="master",
source_resolved_reference="123456",
)
@pytest.mark.parametrize("is_locked", [False, True])
def test_installer_should_use_the_locked_version_of_git_dependencies_with_extras(
installer: Installer,
locker: Locker,
package: ProjectPackage,
repo: Repository,
is_locked: bool,
) -> None:
if is_locked:
locker.locked(True)
locker.mock_lock_data(fixture("with-vcs-dependency-with-extras"))
expected_reference = "123456"
else:
expected_reference = MOCK_DEFAULT_GIT_REVISION
package.add_dependency(
Factory.create_dependency(
"demo",
{
"git": "https://github.com/demo/demo.git",
"branch": "master",
"extras": ["foo"],
},
)
)
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cleo", "1.0.0"))
result = installer.run()
assert result == 0
assert isinstance(installer.executor, TestExecutor)
assert len(installer.executor.installations) == 3
demo_installation = next(
package
for package in installer.executor.installations
if package.name == "demo"
)
assert demo_installation == Package(
"demo",
"0.1.2",
source_type="git",
source_url="https://github.com/demo/demo.git",
source_reference="master",
source_resolved_reference=expected_reference,
)
@pytest.mark.parametrize("is_locked", [False, True])
def test_installer_should_use_the_locked_version_of_git_dependencies_without_reference(
installer: Installer,
locker: Locker,
package: ProjectPackage,
repo: Repository,
is_locked: bool,
) -> None:
"""
If there is no explicit reference (branch or tag or rev) in pyproject.toml,
HEAD is used.
"""
if is_locked:
locker.locked(True)
locker.mock_lock_data(fixture("with-vcs-dependency-without-ref"))
expected_reference = "123456"
else:
expected_reference = MOCK_DEFAULT_GIT_REVISION
package.add_dependency(
Factory.create_dependency("demo", {"git": "https://github.com/demo/demo.git"})
)
repo.add_package(get_package("pendulum", "1.4.4"))
result = installer.run()
assert result == 0
assert isinstance(installer.executor, TestExecutor)
assert len(installer.executor.installations) == 2
demo_installation = next(
package
for package in installer.executor.installations
if package.name == "demo"
)
assert demo_installation == Package(
"demo",
"0.1.2",
source_type="git",
source_url="https://github.com/demo/demo.git",
source_reference="HEAD",
source_resolved_reference=expected_reference,
)
@pytest.mark.parametrize("lock_version", ("2.0", "2.1"))
@pytest.mark.parametrize("env_platform", ["darwin", "linux"])
def test_installer_distinguishes_locked_packages_with_local_version_by_source(
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
repo: Repository,
package: ProjectPackage,
env_platform: str,
lock_version: str,
) -> None:
"""https://github.com/python-poetry/poetry/issues/6710"""
# Require 1.11.0+cpu from pytorch for most platforms, but specify 1.11.0 and pypi on
# darwin.
package.add_dependency(
Factory.create_dependency(
"torch",
{
"version": "1.11.0+cpu",
"markers": "sys_platform != 'darwin'",
"source": "pytorch",
},
)
)
package.add_dependency(
Factory.create_dependency(
"torch",
{
"version": "1.11.0",
"markers": "sys_platform == 'darwin'",
"source": "pypi",
},
)
)
# Locking finds both the pypi and the pytorch packages.
lock_data: dict[str, Any] = {
"package": [
{
"name": "torch",
"version": "1.11.0",
"optional": False,
"files": [],
"python-versions": "*",
},
{
"name": "torch",
"version": "1.11.0+cpu",
"optional": False,
"files": [],
"python-versions": "*",
"source": {
"type": "legacy",
"url": "https://download.pytorch.org/whl",
"reference": "pytorch",
},
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
},
}
if lock_version == "2.1":
lock_data["package"][0]["groups"] = ["main"]
lock_data["package"][0]["markers"] = "sys_platform == 'darwin'"
lock_data["package"][1]["groups"] = ["main"]
lock_data["package"][1]["markers"] = "sys_platform != 'darwin'"
locker.locked(True)
locker.mock_lock_data(lock_data)
installer = Installer(
NullIO(),
MockEnv(platform=env_platform),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(platform=env_platform),
pool,
config,
NullIO(),
),
)
result = installer.run()
assert result == 0
# Results of installation are consistent with the platform requirements.
version = "1.11.0" if env_platform == "darwin" else "1.11.0+cpu"
source_type = None if env_platform == "darwin" else "legacy"
source_url = (
None if env_platform == "darwin" else "https://download.pytorch.org/whl"
)
source_reference = None if env_platform == "darwin" else "pytorch"
assert isinstance(installer.executor, TestExecutor)
assert len(installer.executor.installations) == 1
assert installer.executor.installations[0] == Package(
"torch",
version,
source_type=source_type,
source_url=source_url,
source_reference=source_reference,
)
@pytest.mark.parametrize("lock_version", ("2.0", "2.1"))
@pytest.mark.parametrize("env_platform_machine", ["aarch64", "amd64"])
def test_installer_distinguishes_locked_packages_with_same_version_by_source(
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
repo: Repository,
package: ProjectPackage,
env_platform_machine: str,
lock_version: str,
) -> None:
"""https://github.com/python-poetry/poetry/issues/8303"""
package.add_dependency(
Factory.create_dependency(
"kivy",
{
"version": "2.2.1",
"markers": "platform_machine == 'aarch64'",
"source": "pywheels",
},
)
)
package.add_dependency(
Factory.create_dependency(
"kivy",
{
"version": "2.2.1",
"markers": "platform_machine != 'aarch64'",
"source": "PyPI",
},
)
)
# Locking finds both the pypi and the pyhweels packages.
lock_data: dict[str, Any] = {
"package": [
{
"name": "kivy",
"version": "2.2.1",
"optional": False,
"files": [],
"python-versions": "*",
},
{
"name": "kivy",
"version": "2.2.1",
"optional": False,
"files": [],
"python-versions": "*",
"source": {
"type": "legacy",
"url": "https://www.piwheels.org/simple",
"reference": "pywheels",
},
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
},
}
if lock_version == "2.1":
lock_data["package"][0]["groups"] = ["main"]
lock_data["package"][0]["markers"] = "platform_machine != 'aarch64'"
lock_data["package"][1]["groups"] = ["main"]
lock_data["package"][1]["markers"] = "platform_machine == 'aarch64'"
locker.locked(True)
locker.mock_lock_data(lock_data)
installer = Installer(
NullIO(),
MockEnv(platform_machine=env_platform_machine),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(platform_machine=env_platform_machine),
pool,
config,
NullIO(),
),
)
result = installer.run()
assert result == 0
# Results of installation are consistent with the platform requirements.
version = "2.2.1"
if env_platform_machine == "aarch64":
source_type = "legacy"
source_url = "https://www.piwheels.org/simple"
source_reference = "pywheels"
else:
source_type = None
source_url = None
source_reference = None
assert isinstance(installer.executor, TestExecutor)
assert len(installer.executor.installations) == 1
assert installer.executor.installations[0] == Package(
"kivy",
version,
source_type=source_type,
source_url=source_url,
source_reference=source_reference,
)
@pytest.mark.parametrize("lock_version", ("2.0", "2.1"))
@pytest.mark.parametrize("env_platform", ["darwin", "linux"])
def test_explicit_source_dependency_with_direct_origin_dependency(
pool: RepositoryPool,
locker: Locker,
installed: CustomInstalledRepository,
config: Config,
repo: Repository,
package: ProjectPackage,
env_platform: str,
lock_version: str,
) -> None:
"""
A dependency with explicit source should not be satisfied by
a direct origin dependency even if there is a version match.
"""
demo_url = (
"https://files.pythonhosted.org/distributions/demo-0.1.0-py2.py3-none-any.whl"
)
package.add_dependency(
Factory.create_dependency(
"demo",
{
"markers": "sys_platform != 'darwin'",
"url": demo_url,
},
)
)
package.add_dependency(
Factory.create_dependency(
"demo",
{
"version": "0.1.0",
"markers": "sys_platform == 'darwin'",
"source": "repo",
},
)
)
# The url demo dependency depends on pendulum.
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("demo", "0.1.0"))
# Locking finds both the direct origin and the explicit source packages.
lock_data: dict[str, Any] = {
"package": [
{
"name": "demo",
"version": "0.1.0",
"optional": False,
"files": [],
"python-versions": "*",
"dependencies": {"pendulum": ">=1.4.4"},
"source": {
"type": "url",
"url": demo_url,
},
},
{
"name": "demo",
"version": "0.1.0",
"optional": False,
"files": [],
"python-versions": "*",
"source": {
"type": "legacy",
"url": "https://www.demo.org/simple",
"reference": "repo",
},
},
{
"name": "pendulum",
"version": "1.4.4",
"optional": False,
"files": [],
"python-versions": "*",
},
],
"metadata": {
"lock-version": lock_version,
"python-versions": "*",
"content-hash": "123456789",
},
}
if lock_version == "2.1":
for locked_package in lock_data["package"]:
locked_package["groups"] = ["main"]
lock_data["package"][0]["markers"] = "sys_platform != 'darwin'"
lock_data["package"][1]["markers"] = "sys_platform == 'darwin'"
lock_data["package"][2]["markers"] = "sys_platform != 'darwin'"
locker.locked(True)
locker.mock_lock_data(lock_data)
installer = Installer(
NullIO(),
MockEnv(platform=env_platform),
package,
locker,
pool,
config,
installed=installed,
executor=TestExecutor(
MockEnv(platform=env_platform),
pool,
config,
NullIO(),
),
)
result = installer.run()
assert result == 0
assert isinstance(installer.executor, TestExecutor)
if env_platform == "linux":
assert set(installer.executor.installations) == {
Package("pendulum", "1.4.4"),
Package(
"demo",
"0.1.0",
source_type="url",
source_url=demo_url,
),
}
else:
assert installer.executor.installations == [
Package(
"demo",
"0.1.0",
source_type="legacy",
source_url="https://www.demo.org/simple",
source_reference="repo",
)
]
| Locker |
python | getsentry__sentry | src/sentry/relocation/tasks/process.py | {
"start": 41761,
"end": 70161
} | class ____:
"""
A task, along with a series of parameters to be passed to its `.apply_async` method, allowing
the task to be scheduled at some later point in the execution.
"""
task: Task
args: list[Any]
countdown: int | None = None
def schedule(self):
"""
Run the `.apply_async()` call defined by this future.
"""
self.task.apply_async(args=self.args, countdown=self.countdown)
def _update_relocation_validation_attempt(
task: OrderedTask,
relocation: Relocation,
relocation_validation: RelocationValidation,
relocation_validation_attempt: RelocationValidationAttempt,
status: ValidationStatus,
) -> NextTask | None:
"""
After a `RelocationValidationAttempt` resolves, make sure to update the owning
`RelocationValidation` and `Relocation` as well.
Returns the subsequent task that should be executed as soon as the wrapping
`retry_task_or_fail_relocation` exits, as the last action in the currently running task.
"""
with atomic_transaction(
using=(
router.db_for_write(Relocation),
router.db_for_write(RelocationValidation),
router.db_for_write(RelocationValidationAttempt),
)
):
uuid_str = str(relocation.uuid)
# If no interesting status updates occurred, check again in a minute.
if status == ValidationStatus.IN_PROGRESS:
logger.info(
"Validation polling: scheduled",
extra={"uuid": uuid_str, "task": task.name},
)
return NextTask(
task=validating_poll,
args=[uuid_str, str(relocation_validation_attempt.build_id)],
countdown=60,
)
relocation_validation_attempt.status = status.value
# These statuses merit failing this attempt and kicking off a new
# `RelocationValidationAttempt`, if possible.
if status in {ValidationStatus.TIMEOUT, ValidationStatus.FAILURE}:
if relocation_validation.attempts < MAX_VALIDATION_POLL_ATTEMPTS:
relocation_validation_attempt.status = status.value
relocation_validation_attempt.save()
# Go back to `validating_start`; since this is a new attempt at that task, we reset
# the `latest_task_attempts` counter to 0.
relocation.latest_task = OrderedTask.VALIDATING_START.name
relocation.latest_task_attempts = 0
relocation.save()
logger.info(
"Validation timed out",
extra={"uuid": uuid_str, "task": task.name},
)
return NextTask(task=validating_start, args=[uuid_str])
# Always accept the numerically higher `ValidationStatus`, since that is a more definite
# result.
if relocation_validation.status < status.value:
relocation_validation.status = status.value
relocation_validation_attempt.save()
transaction.on_commit(
lambda: fail_relocation(
relocation, task, "Validation could not be completed. Please contact support."
),
using=router.db_for_write(Relocation),
)
return None
# All remaining statuses are final, so we can update the owning `RelocationValidation` now.
assert status in {ValidationStatus.INVALID, ValidationStatus.VALID}
relocation_validation_attempt.status = status.value
relocation_validation_attempt.save()
relocation_validation.status = status.value
relocation_validation.save()
# If we've reached a definite status, resolve both the `RelocationValidation` and this
# constituent `RelocationValidationAttempt`.
if status == ValidationStatus.INVALID:
logger.info(
"Validation result: invalid",
extra={"uuid": uuid_str, "task": task.name},
)
transaction.on_commit(
lambda: fail_relocation(
relocation,
task,
"The data you provided failed validation. Please contact support.",
),
using=router.db_for_write(Relocation),
)
return None
assert status == ValidationStatus.VALID
relocation.step = Relocation.Step.IMPORTING.value
relocation.save()
logger.info(
"Validation result: valid",
extra={"uuid": uuid_str, "task": task.name},
)
return NextTask(task=importing, args=[uuid_str])
@instrumented_task(
name="sentry.relocation.validating_start",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def validating_start(uuid: str) -> None:
"""
Calls into Google CloudBuild and kicks off a validation run.
This function is meant to be idempotent, and should be retried with an exponential backoff.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.VALIDATING_START,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
relocation_validation = _get_relocation_validation(relocation, OrderedTask.VALIDATING_START)
if relocation_validation is None:
return
if relocation_validation.attempts >= MAX_VALIDATION_RUNS:
fail_relocation(relocation, OrderedTask.VALIDATING_START, ERR_VALIDATING_MAX_RUNS)
return
with retry_task_or_fail_relocation(
relocation, OrderedTask.VALIDATING_START, attempts_left, ERR_VALIDATING_INTERNAL
):
cb_client = CloudBuildClient()
def camel_to_snake_keep_underscores(value):
match = re.search(r"(_++)$", value)
converted = camel_to_snake_case(value)
return converted + (match.group(0) if match else "")
cb_yaml = create_cloudbuild_yaml(relocation)
cb_conf = yaml.safe_load(cb_yaml)
build = Build(
source={
"storage_source": {
"bucket": get_relocations_bucket_name(),
"object_": f"runs/{uuid}/conf/cloudbuild.zip",
}
},
steps=convert_dict_key_case(cb_conf["steps"], camel_to_snake_keep_underscores),
artifacts=convert_dict_key_case(cb_conf["artifacts"], camel_to_snake_keep_underscores),
timeout=convert_dict_key_case(cb_conf["timeout"], camel_to_snake_keep_underscores),
options=convert_dict_key_case(cb_conf["options"], camel_to_snake_keep_underscores),
tags=[
f"relocation-into-{get_local_region().name}",
f"relocation-id-{uuid}",
],
)
response = cb_client.create_build(project_id=gcp_project_id(), build=build)
with atomic_transaction(
using=(
router.db_for_write(RelocationValidation),
router.db_for_write(RelocationValidationAttempt),
)
):
relocation_validation.attempts += 1
relocation_validation.save()
RelocationValidationAttempt.objects.create(
relocation=relocation,
relocation_validation=relocation_validation,
build_id=response.metadata.build.id,
)
validating_poll.apply_async(args=[uuid, str(response.metadata.build.id)])
@instrumented_task(
name="sentry.relocation.validating_poll",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_VALIDATION_POLLS, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def validating_poll(uuid: str, build_id: str) -> None:
"""
Checks the progress of a Google CloudBuild validation run.
This function is meant to be idempotent, and should be retried with an exponential backoff.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.VALIDATING_POLL,
allowed_task_attempts=MAX_VALIDATION_POLL_ATTEMPTS,
)
if relocation is None:
return
relocation_validation = _get_relocation_validation(relocation, OrderedTask.VALIDATING_POLL)
if relocation_validation is None:
return
relocation_validation_attempt = _get_relocation_validation_attempt(
relocation, relocation_validation, build_id, OrderedTask.VALIDATING_POLL
)
if relocation_validation_attempt is None:
return
logger.info(
"Validation polling: active",
extra={
"uuid": uuid,
"task": OrderedTask.VALIDATING_POLL.name,
"build_id": build_id,
},
)
next_task = None
with retry_task_or_fail_relocation(
relocation, OrderedTask.VALIDATING_POLL, attempts_left, ERR_VALIDATING_INTERNAL
):
cb_client = CloudBuildClient()
build = cb_client.get_build(project_id=gcp_project_id(), id=str(build_id))
date_added = (
relocation_validation_attempt.date_added
if relocation_validation_attempt.date_added is not None
else datetime.fromtimestamp(0)
)
timeout_limit = datetime.now(UTC) - DEFAULT_VALIDATION_TIMEOUT
if build.status == Build.Status.SUCCESS:
next_task = NextTask(
task=validating_complete,
args=[uuid, str(build_id)],
)
elif build.status in {
Build.Status.FAILURE,
Build.Status.INTERNAL_ERROR,
Build.Status.CANCELLED,
}:
next_task = _update_relocation_validation_attempt(
OrderedTask.VALIDATING_POLL,
relocation,
relocation_validation,
relocation_validation_attempt,
ValidationStatus.FAILURE,
)
elif (
build.status in {Build.Status.TIMEOUT, Build.Status.EXPIRED}
or date_added < timeout_limit
):
next_task = _update_relocation_validation_attempt(
OrderedTask.VALIDATING_POLL,
relocation,
relocation_validation,
relocation_validation_attempt,
ValidationStatus.TIMEOUT,
)
else:
next_task = _update_relocation_validation_attempt(
OrderedTask.VALIDATING_POLL,
relocation,
relocation_validation,
relocation_validation_attempt,
ValidationStatus.IN_PROGRESS,
)
if next_task is not None:
next_task.schedule()
@instrumented_task(
name="sentry.relocation.validating_complete",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def validating_complete(uuid: str, build_id: str) -> None:
"""
Wraps up a validation run, and reports on what we found. If this task is being called, the
CloudBuild run as completed successfully, so we just need to figure out if there were any
findings (failure) or not (success).
This function is meant to be idempotent, and should be retried with an exponential backoff.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.VALIDATING_COMPLETE,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
relocation_validation = _get_relocation_validation(relocation, OrderedTask.VALIDATING_COMPLETE)
if relocation_validation is None:
return
relocation_validation_attempt = _get_relocation_validation_attempt(
relocation, relocation_validation, build_id, OrderedTask.VALIDATING_COMPLETE
)
if relocation_validation_attempt is None:
return
next_task = None
with retry_task_or_fail_relocation(
relocation,
OrderedTask.VALIDATING_COMPLETE,
attempts_left,
ERR_VALIDATING_INTERNAL,
):
storage = get_relocation_storage()
final_status = ValidationStatus.VALID
(_, findings_files) = storage.listdir(f"runs/{uuid}/findings")
for file in sorted(findings_files, reverse=True):
# Ignore files prefixed with `artifacts-`, as these are generated by CloudBuild.
if file.startswith("artifacts-"):
continue
findings_file = storage.open(f"runs/{uuid}/findings/{file}")
with findings_file:
findings = json.load(findings_file)
if len(findings) > 0:
final_status = ValidationStatus.INVALID
break
next_task = _update_relocation_validation_attempt(
OrderedTask.VALIDATING_COMPLETE,
relocation,
relocation_validation,
relocation_validation_attempt,
final_status,
)
if next_task is not None:
next_task.schedule()
@instrumented_task(
name="sentry.relocation.importing",
namespace=relocation_tasks,
processing_deadline_duration=SLOW_TIME_LIMIT,
# At first blush, it would seem that retrying a failed import will leave a bunch of "abandoned"
# data from the previous one, but that is not actually the case: because we use this relocation
# UUID as the `import_uuid` for the `import_in...` call, we'll be able to re-use all of the
# already-written import chunks (and, by extension, their models). This is due to each import
# write operation atomically checking the relevant `ImportChunk` table for collisions at
# database write time. So it will attempt to write a new copy, realize that this `(import_uuid,
# model, ordinal)` three-tuple has already been written, and return that information instead.
# Basically, all of the already completed write operations will be no-ops that return the
# already-written models and pk maps, and we'll pick up right where we left off.
#
# The main reason to have this at all is to guard against transient errors, especially with RPC
# or task timeouts.
retry=Retry(times=MAX_SLOW_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def importing(uuid: str) -> None:
"""
Perform the import on the actual live instance we are targeting.
This function is NOT idempotent - if an import breaks, we should just abandon it rather than
trying it again!
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.IMPORTING,
allowed_task_attempts=MAX_SLOW_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.IMPORTING,
attempts_left,
ERR_IMPORTING_INTERNAL,
):
# The `uploading_complete` task above should have verified that this is ready for use.
raw_relocation_file = (
RelocationFile.objects.filter(
relocation=relocation,
kind=RelocationFile.Kind.RAW_USER_DATA.value,
)
.select_related("file")
.get()
)
relocation_data_fp = raw_relocation_file.file.getfile()
log_gcp_credentials_details(logger)
kms_config_fp = BytesIO(json.dumps(get_default_crypto_key_version()).encode("utf-8"))
with relocation_data_fp, kms_config_fp:
import_in_organization_scope(
relocation_data_fp,
decryptor=GCPKMSDecryptor(kms_config_fp),
flags=ImportFlags(
import_uuid=str(uuid),
hide_organizations=True,
merge_users=relocation.provenance == Relocation.Provenance.SAAS_TO_SAAS,
overwrite_configs=False,
),
org_filter=set(relocation.want_org_slugs),
printer=LoggingPrinter(uuid),
)
postprocessing.apply_async(args=[uuid])
@instrumented_task(
name="sentry.relocation.postprocessing",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def postprocessing(uuid: str) -> None:
"""
Make the owner of this relocation an owner of all of the organizations we just imported.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.POSTPROCESSING,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.POSTPROCESSING,
attempts_left,
ERR_POSTPROCESSING_INTERNAL,
):
imported_org_ids: set[int] = set()
for chunk in RegionImportChunk.objects.filter(
import_uuid=uuid, model="sentry.organization"
):
imported_org_ids = imported_org_ids.union(set(chunk.inserted_map.values()))
# Do a sanity check on pk-mapping before we go and make anyone the owner of an org they did
# not import - are all of these orgs plausibly ones that the user requested, based on slug
# matching?
imported_orgs = Organization.objects.filter(id__in=imported_org_ids)
for org in imported_orgs:
matched_prefix = False
for slug_prefix in relocation.want_org_slugs:
if org.slug.startswith(slug_prefix):
matched_prefix = True
break
# This should always be treated as an internal logic error, since we just wrote these
# orgs, so probably there is a serious bug with pk mapping.
assert matched_prefix is True
# Okay, all of the new organizations specified by the import chunk seem kosher - go ahead
# and make the owner of this import an owner of all of them.
for org in imported_orgs:
organization_service.add_organization_member(
organization_id=org.id,
default_org_role=org.default_role,
user_id=relocation.owner_id,
role="owner",
)
# Last, but certainly not least: trigger signals, so that interested subscribers in eg:
# getsentry can do whatever postprocessing they need to. If even a single one fails, we fail
# the entire task.
for _, result in relocated.send_robust(sender=postprocessing, relocation_uuid=uuid):
if isinstance(result, Exception):
raise result
# This signal must come after the relocated signal, to ensure that the subscription and
# customer models have been appropriately set up before attempting to redeem a promo code.
relocation_redeem_promo_code.send_robust(
sender=postprocessing,
user_id=relocation.owner_id,
relocation_uuid=uuid,
orgs=list(imported_orgs),
)
for org in imported_orgs:
try:
analytics.record(
RelocationOrganizationImportedEvent(
organization_id=org.id,
relocation_uuid=uuid,
slug=org.slug,
owner_id=relocation.owner_id,
)
)
except Exception as e:
capture_exception(e)
notifying_unhide.apply_async(args=[uuid])
@instrumented_task(
name="sentry.relocation.notifying_unhide",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def notifying_unhide(uuid: str) -> None:
"""
Un-hide the just-imported organizations, making them visible to users in the UI.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.NOTIFYING_UNHIDE,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.NOTIFYING_UNHIDE,
attempts_left,
ERR_NOTIFYING_INTERNAL,
):
imported_org_ids: set[int] = set()
for chunk in RegionImportChunk.objects.filter(
import_uuid=str(uuid), model="sentry.organization"
):
imported_org_ids = imported_org_ids.union(set(chunk.inserted_map.values()))
# Reveal all imported organizations to their users.
with transaction.atomic(router.db_for_write(Organization)):
imported_orgs = Organization.objects.filter(id__in=imported_org_ids)
for org in imported_orgs:
if org.status == OrganizationStatus.RELOCATION_PENDING_APPROVAL:
org.status = OrganizationStatus.ACTIVE
org.save()
notifying_users.apply_async(args=[uuid])
@instrumented_task(
name="sentry.relocation.notifying_users",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def notifying_users(uuid: str) -> None:
"""
Send an email to all users that have been imported, telling them to claim their accounts.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.NOTIFYING_USERS,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.NOTIFYING_USERS,
attempts_left,
ERR_NOTIFYING_INTERNAL,
):
imported_user_ids: set[int] = set()
chunks = ControlImportChunkReplica.objects.filter(import_uuid=uuid, model="sentry.user")
for control_chunk in chunks:
imported_user_ids = imported_user_ids.union(set(control_chunk.inserted_map.values()))
imported_org_slugs: set[str] = set()
for region_chunk in RegionImportChunk.objects.filter(
import_uuid=uuid, model="sentry.organization"
):
imported_org_slugs = imported_org_slugs.union(
set(region_chunk.inserted_identifiers.values())
)
# Do a sanity check on pk-mapping before we go and reset the passwords of random users - are
# all of these usernames plausibly ones that were included in the import, based on username
# prefix matching?
imported_users = user_service.get_many(filter={"user_ids": list(imported_user_ids)})
for user in imported_users:
matched_prefix = False
for username_prefix in relocation.want_usernames or ():
if user.username.startswith(username_prefix):
matched_prefix = True
break
# This should always be treated as an internal logic error, since we just wrote these
# orgs, so probably there is a serious bug with pk mapping.
assert matched_prefix is True
# Okay, everything seems fine - go ahead and send those emails.
for user in imported_users:
# Sometimes, we merge users together before unpausing a relocation. No need to send an
# email to these users!
if not user.is_unclaimed:
continue
hash = lost_password_hash_service.get_or_create(user_id=user.id).hash
LostPasswordHash.send_relocate_account_email(user, hash, list(imported_org_slugs))
relocation.latest_unclaimed_emails_sent_at = datetime.now(UTC)
relocation.save()
notifying_owner.apply_async(args=[uuid])
@instrumented_task(
name="sentry.relocation.notifying_owner",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def notifying_owner(uuid: str) -> None:
"""
Send an email to the creator and owner, telling them that their relocation was successful.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.NOTIFYING_OWNER,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.NOTIFYING_OWNER,
attempts_left,
ERR_NOTIFYING_INTERNAL,
):
imported_org_slugs: set[int] = set()
for chunk in RegionImportChunk.objects.filter(
import_uuid=uuid, model="sentry.organization"
):
imported_org_slugs = imported_org_slugs.union(set(chunk.inserted_identifiers.values()))
send_relocation_update_email(
relocation,
Relocation.EmailKind.SUCCEEDED,
{
"uuid": uuid,
"orgs": list(imported_org_slugs),
},
)
completed.apply_async(args=[uuid])
@instrumented_task(
name="sentry.relocation.completed",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
retry=Retry(times=MAX_FAST_TASK_RETRIES, on=(Exception,), times_exceeded=LastAction.Discard),
silo_mode=SiloMode.REGION,
)
def completed(uuid: str) -> None:
"""
Finish up a relocation by marking it a success.
"""
uuid = str(uuid)
(relocation, attempts_left) = start_relocation_task(
uuid=uuid,
task=OrderedTask.COMPLETED,
allowed_task_attempts=MAX_FAST_TASK_ATTEMPTS,
)
if relocation is None:
return
with retry_task_or_fail_relocation(
relocation,
OrderedTask.COMPLETED,
attempts_left,
ERR_COMPLETED_INTERNAL,
):
relocation.status = Relocation.Status.SUCCESS.value
relocation.save()
@instrumented_task(
name="sentry.relocation.noop",
namespace=relocation_tasks,
processing_deadline_duration=FAST_TIME_LIMIT,
silo_mode=SiloMode.REGION,
)
def noop():
pass
TASK_MAP: dict[OrderedTask, Task] = {
OrderedTask.NONE: noop,
OrderedTask.UPLOADING_START: uploading_start,
OrderedTask.UPLOADING_COMPLETE: uploading_complete,
OrderedTask.PREPROCESSING_SCAN: preprocessing_scan,
OrderedTask.PREPROCESSING_TRANSFER: preprocessing_transfer,
OrderedTask.PREPROCESSING_BASELINE_CONFIG: preprocessing_baseline_config,
OrderedTask.PREPROCESSING_COLLIDING_USERS: preprocessing_colliding_users,
OrderedTask.PREPROCESSING_COMPLETE: preprocessing_complete,
OrderedTask.VALIDATING_START: validating_start,
OrderedTask.VALIDATING_POLL: validating_poll,
OrderedTask.VALIDATING_COMPLETE: validating_complete,
OrderedTask.IMPORTING: importing,
OrderedTask.POSTPROCESSING: postprocessing,
OrderedTask.NOTIFYING_UNHIDE: notifying_unhide,
OrderedTask.NOTIFYING_USERS: notifying_users,
OrderedTask.NOTIFYING_OWNER: notifying_owner,
OrderedTask.COMPLETED: completed,
}
assert set(OrderedTask._member_map_.keys()) == {k.name for k in TASK_MAP.keys()}
def get_first_task_for_step(target_step: Relocation.Step) -> Task | None:
min_task: OrderedTask | None = None
for ordered_task, step in TASK_TO_STEP.items():
if step == target_step:
if min_task is None or ordered_task.value < min_task.value:
min_task = ordered_task
if min_task is None or min_task == OrderedTask.NONE:
return None
return TASK_MAP.get(min_task, None)
| NextTask |
python | bokeh__bokeh | src/bokeh/plotting/gmap.py | {
"start": 4534,
"end": 4926
} | class ____(BaseFigureOptions):
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| GMapFigureOptions |
python | apache__airflow | providers/google/tests/unit/google/cloud/utils/test_datafusion.py | {
"start": 921,
"end": 1477
} | class ____:
@pytest.mark.parametrize(
("str_value", "expected_item"),
[
("batch", DataFusionPipelineType.BATCH),
("stream", DataFusionPipelineType.STREAM),
],
)
def test_from_str(self, str_value, expected_item):
assert DataFusionPipelineType.from_str(str_value) == expected_item
def test_from_str_error(self):
with pytest.raises(ValueError, match="Invalid value 'non-existing value'."):
DataFusionPipelineType.from_str("non-existing value")
| TestDataFusionPipelineType |
python | tensorflow__tensorflow | tensorflow/python/eager/monitoring.py | {
"start": 12559,
"end": 12898
} | class ____(object):
"""Bucketing strategies for the samplers."""
__slots__ = ["buckets"]
def __init__(self, buckets):
"""Creates a new Buckets.
Args:
buckets: A c pointer of TFE_MonitoringBuckets.
"""
self.buckets = buckets
def __del__(self):
pywrap_tfe.TFE_MonitoringDeleteBuckets(self.buckets)
| Buckets |
python | walkccc__LeetCode | solutions/349. Intersection of Two Arrays/349.py | {
"start": 0,
"end": 239
} | class ____:
def intersection(self, nums1: list[int], nums2: list[int]) -> list[int]:
ans = []
nums1 = set(nums1)
for num in nums2:
if num in nums1:
ans.append(num)
nums1.remove(num)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/nccl_ops_test.py | {
"start": 4504,
"end": 5267
} | class ____(NcclTestCase):
def testAllReduce(self):
self._Test(partial(_NcclAllReduce, nccl_ops.all_sum), lambda x, y: x + y)
self._Test(partial(_NcclAllReduce, nccl_ops.all_prod), lambda x, y: x * y)
self._Test(partial(_NcclAllReduce, nccl_ops.all_min), np.minimum)
self._Test(partial(_NcclAllReduce, nccl_ops.all_max), np.maximum)
def testAllSumGrad(self):
self._TestGradient(
partial(_NcclAllReduce, nccl_ops.all_sum), lambda x, y: x + y)
def testErrors(self):
with self.assertRaisesRegex(ValueError, 'Device assignment .* required'):
nccl_ops.all_sum([array_ops.identity(np.random.random_sample((3, 4)))])
with self.assertRaisesRegex(ValueError, 'Must pass >0 tensors'):
nccl_ops.all_sum([])
| AllReduceTest |
python | tensorflow__tensorflow | tensorflow/python/framework/errors_impl.py | {
"start": 12840,
"end": 13286
} | class ____(OpError):
"""Raised when the request does not have valid authentication credentials.
This exception is not currently used.
"""
def __init__(self, node_def, op, message, *args):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED, *args)
@tf_export("errors.ResourceExhaustedError")
| UnauthenticatedError |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/evaluator_evaluation.py | {
"start": 9620,
"end": 11015
} | class ____(BaseLlamaExamplePrediction):
"""
Pairwise evaluation example prediction class.
Args:
feedback (Optional[str]): The evaluator's feedback.
score (Optional[float]): The evaluator's score.
evaluation_source (EvaluationSource): If the evaluation came from original order or flipped; or inconclusive.
"""
feedback: str = Field(
default_factory=str,
description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.",
)
score: Optional[float] = Field(
default=None,
description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.",
)
evaluation_source: Optional[EvaluationSource] = Field(
default=None,
description=(
"Whether the evaluation comes from original, or flipped ordering. Can also be neither here indicating inconclusive judgement."
),
)
invalid_prediction: bool = Field(
default=False, description="Whether or not the prediction is a valid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason as to why prediction is invalid."
)
@property
def class_name(self) -> str:
"""Data example class name."""
return "PairwiseEvaluatorExamplePrediction"
| PairwiseEvaluatorExamplePrediction |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 44687,
"end": 45117
} | class ____(BaseModel, extra="forbid"):
"""
Geo filter request Matches coordinates inside the circle of `radius` and center with coordinates `center`
"""
center: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the circle of `radius` and center with coordinates `center`",
)
radius: float = Field(..., description="Radius of the area in meters")
| GeoRadius |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 11253,
"end": 11466
} | class ____(OverrideSampleOnClassTestsMixin,
TransactionTestCase):
"""
Run tests with Django TransactionTestCase
"""
| OverrideSampleOnClassTransactionTestCase |
python | jazzband__tablib | src/tablib/formats/_html.py | {
"start": 1952,
"end": 3570
} | class ____(HTMLParser):
def __init__(self, dataset, *args, table_id=None, **kwargs):
super().__init__(*args, **kwargs)
self.dset = dataset
self.table_id = table_id
self.table_found = False
self.table_open = False
self.thead_open = False
self.cell_open = False
self.headers = []
self.current_row = []
self.current_data = ''
def handle_starttag(self, tag, attrs):
if (
tag == 'table' and not self.table_found and
(not self.table_id or dict(attrs).get('id') == self.table_id)
):
self.table_open = True
self.table_found = True
elif self.table_open:
if tag == 'thead':
self.thead_open = True
elif tag in ['td', 'th']:
self.cell_open = True
def handle_endtag(self, tag):
if not self.table_open:
return
if tag == 'table':
self.table_open = False
elif tag == 'thead':
self.thead_open = False
self.dset.headers = self.headers
elif tag == 'tr' and self.current_row:
self.dset.append(self.current_row)
self.current_row = []
elif tag in ['td', 'th']:
if self.thead_open:
self.headers.append(self.current_data)
else:
self.current_row.append(self.current_data)
self.cell_open = False
self.current_data = ''
def handle_data(self, data):
if self.cell_open:
self.current_data += data
| TablibHTMLParser |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 2498,
"end": 2613
} | class ____(BaseModel):
x: str = Field(..., alias='y')
z: int
AliasModel(y=1, z=2)
x_alias = 'y'
| AliasModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 2512,
"end": 2819
} | class ____(models.Model):
new_field = models.CharField(max_length=10)
class Meta:
abstract = True
def __str__(self):
return self.new_field
@property
def my_brand_new_property(self):
return 1
def my_beautiful_method(self):
return 2
| AbstractTestModel4 |
python | walkccc__LeetCode | solutions/3377. Digit Operations to Make Two Integers Equal/3377.py | {
"start": 0,
"end": 1331
} | class ____:
def minOperations(self, n: int, m: int) -> int:
isPrime = self._sieveEratosthenes(10000)
if isPrime[n] or isPrime[m]:
return -1
return self._dijkstra(n, m, isPrime)
def _dijkstra(self, src: int, dst: int, isPrime: list[bool]) -> int:
seen = {src}
minHeap = [(src, src)] # (cost, num)
while minHeap:
cost, curr = heapq.heappop(minHeap)
if curr == dst:
return cost
s = list(str(curr))
for i, c in enumerate(s):
if c < '9':
s[i] = str(int(c) + 1)
nextNum = int(''.join(s))
if not isPrime[nextNum] and nextNum not in seen:
heapq.heappush(minHeap, (cost + nextNum, nextNum))
seen.add(nextNum)
s[i] = c
if c > '0' and not (i == 0 and c == '1'):
s[i] = str(int(c) - 1)
nextNum = int(''.join(s))
if not isPrime[nextNum] and nextNum not in seen:
heapq.heappush(minHeap, (cost + nextNum, nextNum))
seen.add(nextNum)
s[i] = c
return -1
def _sieveEratosthenes(self, n: int) -> list[bool]:
isPrime = [True] * n
isPrime[0] = False
isPrime[1] = False
for i in range(2, int(n**0.5) + 1):
if isPrime[i]:
for j in range(i * i, n, i):
isPrime[j] = False
return isPrime
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 67499,
"end": 68094
} | class ____(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
| TestDtypeFunctions |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 211173,
"end": 211522
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(BranchProtectionRuleConflict, graphql_name="node")
| BranchProtectionRuleConflictEdge |
python | Delgan__loguru | tests/test_pickling.py | {
"start": 463,
"end": 905
} | class ____:
def __init__(self, flushable=False, stoppable=False):
if flushable:
self.flush = self._flush
if stoppable:
self.stop = self._stop
self.wrote = ""
self.flushed = False
self.stopped = False
def write(self, message):
self.wrote += message
def _flush(self):
self.flushed = True
def _stop(self):
self.stopped = True
| StreamHandler |
python | ray-project__ray | python/ray/autoscaler/_private/aws/cloudwatch/cloudwatch_helper.py | {
"start": 670,
"end": 779
} | class ____(str, Enum):
AGENT = "agent"
DASHBOARD = "dashboard"
ALARM = "alarm"
| CloudwatchConfigType |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 36625,
"end": 39654
} | class ____:
def test_view_contents(self) -> None:
ds = create_test_data()
dt = DataTree(dataset=ds)
assert ds.identical(
dt.dataset
) # this only works because Dataset.identical doesn't check types
assert isinstance(dt.dataset, xr.Dataset)
def test_immutability(self) -> None:
# See issue https://github.com/xarray-contrib/datatree/issues/38
dt = DataTree.from_dict(
{
"/": None,
"/a": None,
},
name="root",
)
with pytest.raises(
AttributeError, match="Mutation of the DatasetView is not allowed"
):
dt.dataset["a"] = xr.DataArray(0)
with pytest.raises(
AttributeError, match="Mutation of the DatasetView is not allowed"
):
dt.dataset.update({"a": 0})
# TODO are there any other ways you can normally modify state (in-place)?
# (not attribute-like assignment because that doesn't work on Dataset anyway)
def test_methods(self) -> None:
ds = create_test_data()
dt = DataTree(dataset=ds)
assert ds.mean().identical(dt.dataset.mean())
assert isinstance(dt.dataset.mean(), xr.Dataset)
def test_arithmetic(self, create_test_datatree) -> None:
dt = create_test_datatree()
expected = create_test_datatree(modify=lambda ds: 10.0 * ds)[
"set1"
].to_dataset()
result = 10.0 * dt["set1"].dataset
assert result.identical(expected)
def test_init_via_type(self) -> None:
# from datatree GH issue https://github.com/xarray-contrib/datatree/issues/188
# xarray's .weighted is unusual because it uses type() to create a Dataset/DataArray
a = xr.DataArray(
np.random.rand(3, 4, 10),
dims=["x", "y", "time"],
coords={"area": (["x", "y"], np.random.rand(3, 4))},
).to_dataset(name="data")
dt = DataTree(dataset=a)
def weighted_mean(ds):
return ds.weighted(ds.area).mean(["x", "y"])
weighted_mean(dt.dataset)
def test_map_keep_attrs(self) -> None:
# test DatasetView.map(..., keep_attrs=...)
data = xr.DataArray([1, 2, 3], dims="x", attrs={"da": "attrs"})
ds = xr.Dataset({"data": data}, attrs={"ds": "attrs"})
dt = DataTree(ds)
def func_keep(ds):
# x.mean() removes the attrs of the data_vars
return ds.map(lambda x: x.mean(), keep_attrs=True)
result = xr.map_over_datasets(func_keep, dt)
expected = dt.mean(keep_attrs=True)
xr.testing.assert_identical(result, expected)
# DatasetView.map keeps attrs by default
def func(ds):
# ds.map and x.mean() both keep attrs by default
return ds.map(lambda x: x.mean())
result = xr.map_over_datasets(func, dt)
expected = dt.mean()
xr.testing.assert_identical(result, expected)
| TestDatasetView |
python | arrow-py__arrow | arrow/locales.py | {
"start": 110564,
"end": 112079
} | class ____(Locale):
names = ["lt", "lt-lt"]
past = "prieš {0}"
future = "po {0}"
and_word = "ir"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "dabar",
"second": "sekundės",
"seconds": "{0} sekundžių",
"minute": "minutės",
"minutes": "{0} minučių",
"hour": "valandos",
"hours": "{0} valandų",
"day": "dieną",
"days": "{0} dienų",
"week": "savaitės",
"weeks": "{0} savaičių",
"month": "mėnesio",
"months": "{0} mėnesių",
"year": "metų",
"years": "{0} metų",
}
month_names = [
"",
"sausis",
"vasaris",
"kovas",
"balandis",
"gegužė",
"birželis",
"liepa",
"rugpjūtis",
"rugsėjis",
"spalis",
"lapkritis",
"gruodis",
]
month_abbreviations = [
"",
"saus",
"vas",
"kovas",
"bal",
"geg",
"birž",
"liepa",
"rugp",
"rugs",
"spalis",
"lapkr",
"gr",
]
day_names = [
"",
"pirmadienis",
"antradienis",
"trečiadienis",
"ketvirtadienis",
"penktadienis",
"šeštadienis",
"sekmadienis",
]
day_abbreviations = [
"",
"pi",
"an",
"tr",
"ke",
"pe",
"še",
"se",
]
| LithuanianLocale |
python | faif__python-patterns | patterns/structural/decorator.py | {
"start": 1130,
"end": 1352
} | class ____(TextTag):
"""Wraps a tag in <b>"""
def __init__(self, wrapped: TextTag) -> None:
self._wrapped = wrapped
def render(self) -> str:
return f"<b>{self._wrapped.render()}</b>"
| BoldWrapper |
python | mlflow__mlflow | tests/conftest.py | {
"start": 4114,
"end": 29380
} | class ____:
path: Path
test_name: str
execution_time: float
_test_results: list[TestResult] = []
def pytest_sessionstart(session):
# Clear duration tracking state at the start of each session
_test_results.clear()
if IS_TRACING_SDK_ONLY:
return
import click
if uri := MLFLOW_TRACKING_URI.get():
click.echo(
click.style(
(
f"Environment variable {MLFLOW_TRACKING_URI} is set to {uri!r}, "
"which may interfere with tests."
),
fg="red",
)
)
def to_md_table(rows: list[list[str]]) -> str:
if not rows:
return ""
n = max(len(r) for r in rows)
rows = [r + [""] * (n - len(r)) for r in rows]
# Calculate column widths
widths = [max(len(row[i]) for row in rows) for i in range(n)]
def esc(s: str) -> str:
return s.replace("|", r"\|").replace("\n", "<br>")
# Format rows with proper padding
def format_row(row: list[str]) -> str:
cells = [esc(cell).ljust(width) for cell, width in zip(row, widths)]
return "| " + " | ".join(cells) + " |"
header = format_row(rows[0])
sep = "| " + " | ".join(["-" * w for w in widths]) + " |"
body = [format_row(row) for row in rows[1:]]
return "\n".join([header, sep, *body])
def generate_duration_stats() -> str:
"""Generate per-file duration statistics as markdown table."""
if not _test_results:
return ""
# Group results by file path
file_groups: defaultdict[Path, list[float]] = defaultdict(list)
for result in _test_results:
file_groups[result.path].append(result.execution_time)
rows = []
for path, test_times in file_groups.items():
rel_path = path.relative_to(Path.cwd()).as_posix()
total_dur = sum(test_times)
if total_dur < 1.0:
# Ignore files with total duration < 1s
continue
test_count = len(test_times)
min_test = min(test_times)
max_test = max(test_times)
avg_test = sum(test_times) / len(test_times)
rows.append((rel_path, total_dur, test_count, min_test, max_test, avg_test))
rows.sort(key=lambda r: r[1], reverse=True)
if not rows:
return ""
# Prepare data for markdown table (headers + data rows)
table_rows = [["Rank", "File", "Duration", "Tests", "Min", "Max", "Avg"]]
for idx, (path, dur, count, min_, max_, avg_) in enumerate(rows, 1):
table_rows.append(
[
str(idx),
f"`{path}`",
f"{dur:.2f}s",
str(count),
f"{min_:.3f}s",
f"{max_:.3f}s",
f"{avg_:.3f}s",
]
)
return to_md_table(table_rows)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item: pytest.Item, nextitem: pytest.Item | None):
start = time.perf_counter()
yield # This includes setup + call + teardown
duration = time.perf_counter() - start
_test_results.append(TestResult(path=item.path, test_name=item.name, execution_time=duration))
def pytest_runtest_setup(item):
markers = [mark.name for mark in item.iter_markers()]
if "requires_ssh" in markers and not item.config.getoption("--requires-ssh"):
pytest.skip("use `--requires-ssh` to run this test")
def fetch_pr_labels():
"""
Returns the labels associated with the current pull request.
"""
if "GITHUB_ACTIONS" not in os.environ:
return None
if os.environ.get("GITHUB_EVENT_NAME") != "pull_request":
return None
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
pr_data = json.load(f)
return [label["name"] for label in pr_data["pull_request"]["labels"]]
@pytest.hookimpl(hookwrapper=True)
def pytest_report_teststatus(report, config):
outcome = yield
if report.when == "call":
try:
import psutil
except ImportError:
return
(*rest, result) = outcome.get_result()
mem = psutil.virtual_memory()
mem_used = mem.used / 1024**3
mem_total = mem.total / 1024**3
disk = psutil.disk_usage("/")
disk_used = disk.used / 1024**3
disk_total = disk.total / 1024**3
outcome.force_result(
(
*rest,
(
f"{result} | "
f"MEM {mem_used:.1f}/{mem_total:.1f} GB | "
f"DISK {disk_used:.1f}/{disk_total:.1f} GB"
),
)
)
@pytest.hookimpl(hookwrapper=True)
def pytest_ignore_collect(collection_path, config):
outcome = yield
if not outcome.get_result() and config.getoption("ignore_flavors"):
# If not ignored by the default hook and `--ignore-flavors` specified
# Ignored files and directories must be included in dev/run-python-flavor-tests.sh
model_flavors = [
# Tests of flavor modules.
"tests/ag2",
"tests/agno",
"tests/anthropic",
"tests/autogen",
"tests/azureml",
"tests/bedrock",
"tests/catboost",
"tests/crewai",
"tests/dspy",
"tests/gemini",
"tests/groq",
"tests/h2o",
"tests/johnsnowlabs",
"tests/keras",
"tests/keras_core",
"tests/llama_index",
"tests/langchain",
"tests/langgraph",
"tests/lightgbm",
"tests/litellm",
"tests/mistral",
"tests/models",
"tests/onnx",
"tests/openai",
"tests/paddle",
"tests/pmdarima",
"tests/prophet",
"tests/pydantic_ai",
"tests/pyfunc",
"tests/pytorch",
"tests/strands",
"tests/haystack",
"tests/semantic_kernel",
"tests/sentence_transformers",
"tests/shap",
"tests/sklearn",
"tests/smolagents",
"tests/spacy",
"tests/spark",
"tests/statsmodels",
"tests/tensorflow",
"tests/transformers",
"tests/xgboost",
# Lazy loading test.
"tests/test_mlflow_lazily_imports_ml_packages.py",
# This test is included here because it imports many big libraries like tf, keras, etc.
"tests/tracking/fluent/test_fluent_autolog.py",
# Cross flavor autologging related tests.
"tests/autologging/test_autologging_safety_unit.py",
"tests/autologging/test_autologging_behaviors_unit.py",
"tests/autologging/test_autologging_behaviors_integration.py",
"tests/autologging/test_autologging_utils.py",
"tests/autologging/test_training_session.py",
]
relpath = os.path.relpath(str(collection_path))
relpath = relpath.replace(os.sep, posixpath.sep) # for Windows
if relpath in model_flavors:
outcome.force_result(True)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(session, config, items):
# Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers`
# results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as
# `before_request` on the application after the first request. To avoid this issue,
# execute `tests.server.test_prometheus_exporter` first by reordering the test items.
items.sort(key=lambda item: item.module.__name__ != "tests.server.test_prometheus_exporter")
# Select the tests to run based on the group and splits
if (splits := config.getoption("--splits")) and (group := config.getoption("--group")):
items[:] = items[(group - 1) :: splits]
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter, exitstatus, config):
yield
# Display per-file durations
if duration_stats := generate_duration_stats():
terminalreporter.write("\n")
header = "per-file durations (sorted)"
terminalreporter.write_sep("=", header)
terminalreporter.write(f"::group::{header}\n\n")
terminalreporter.write(duration_stats)
terminalreporter.write("\n\n::endgroup::\n")
terminalreporter.write("\n")
if (
# `uv run` was used to run tests
"UV" in os.environ
# Tests failed because of missing dependencies
and (errors := terminalreporter.stats.get("error"))
and any(re.search(r"ModuleNotFoundError|ImportError", str(e.longrepr)) for e in errors)
):
terminalreporter.write("\n")
terminalreporter.section("HINTS", yellow=True)
terminalreporter.write(
"To run tests with additional packages, use:\n"
" uv run --with <package> pytest ...\n\n"
"For multiple packages:\n"
" uv run --with <package1> --with <package2> pytest ...\n\n",
yellow=True,
)
# If there are failed tests, display a command to run them
if failed_test_reports := terminalreporter.stats.get("failed", []):
if len(failed_test_reports) <= 30:
ids = [repr(report.nodeid) for report in failed_test_reports]
else:
# Use dict.fromkeys to preserve the order
ids = list(dict.fromkeys(report.fspath for report in failed_test_reports))
terminalreporter.section("command to run failed tests")
terminalreporter.write(" ".join(["pytest"] + ids))
terminalreporter.write("\n" * 2)
if summary_path := os.environ.get("GITHUB_STEP_SUMMARY"):
summary_path = Path(summary_path).resolve()
with summary_path.open("a") as f:
f.write("## Failed tests\n")
f.write("Run the following command to run the failed tests:\n")
f.write("```bash\n")
f.write(" ".join(["pytest"] + ids) + "\n")
f.write("```\n\n")
# If some tests failed at installing mlflow, we suggest using `--serve-wheel` flag.
# Some test cases try to install mlflow via pip e.g. model loading. They pins
# mlflow version to install based on local environment i.e. dev version ahead of
# the latest release, hence it's not found on PyPI. `--serve-wheel` flag was
# introduced to resolve this issue, which starts local PyPI server and serve
# an mlflow wheel based on local source code.
# Ref: https://github.com/mlflow/mlflow/pull/10247
msg = f"No matching distribution found for mlflow=={VERSION}"
for rep in failed_test_reports:
if any(msg in t for t in (rep.longreprtext, rep.capstdout, rep.capstderr)):
terminalreporter.section("HINTS", yellow=True)
terminalreporter.write(
f"Found test(s) that failed with {msg!r}. Adding"
" --serve-wheel` flag to your pytest command may help.\n\n",
yellow=True,
)
break
main_thread = threading.main_thread()
if threads := [t for t in threading.enumerate() if t is not main_thread]:
terminalreporter.section("Remaining threads", yellow=True)
for idx, thread in enumerate(threads, start=1):
terminalreporter.write(f"{idx}: {thread}\n")
# Uncomment this block to print tracebacks of non-daemon threads
# if non_daemon_threads := [t for t in threads if not t.daemon]:
# frames = sys._current_frames()
# terminalreporter.section("Tracebacks of non-daemon threads", yellow=True)
# for thread in non_daemon_threads:
# thread.join(timeout=1)
# if thread.is_alive() and (frame := frames.get(thread.ident)):
# terminalreporter.section(repr(thread), sep="~")
# terminalreporter.write("".join(traceback.format_stack(frame)))
try:
import psutil
except ImportError:
pass
else:
current_process = psutil.Process()
if children := current_process.children(recursive=True):
terminalreporter.section("Remaining child processes", yellow=True)
for idx, child in enumerate(children, start=1):
terminalreporter.write(f"{idx}: {child}\n")
# Test fixtures from tests/conftest.py
@pytest.fixture(autouse=IS_TRACING_SDK_ONLY, scope="session")
def remote_backend_for_tracing_sdk_test():
"""
A fixture to start a remote backend for testing mlflow-tracing package integration.
Since the tracing SDK has to be tested in an environment that has minimal dependencies,
we need to start a tracking backend in an isolated uv environment.
"""
port = get_safe_port()
# Start a remote backend to test mlflow-tracing package integration.
with tempfile.TemporaryDirectory() as temp_dir:
mlflow_root = os.path.dirname(os.path.dirname(__file__))
with subprocess.Popen(
[
"uv",
"run",
"--directory",
# Install from the dev version
mlflow_root,
"mlflow",
"server",
"--port",
str(port),
],
cwd=temp_dir,
) as process:
print("Starting mlflow server on port 5000") # noqa: T201
try:
for _ in range(60):
try:
response = requests.get(f"http://localhost:{port}")
if response.ok:
break
except requests.ConnectionError:
print("MLflow server is not responding yet.") # noqa: T201
time.sleep(1)
else:
raise RuntimeError("Failed to start server")
mlflow.set_tracking_uri(f"http://localhost:{port}")
yield
finally:
process.terminate()
@pytest.fixture(autouse=IS_TRACING_SDK_ONLY)
def tmp_experiment_for_tracing_sdk_test(monkeypatch):
# Generate a random experiment name
experiment_name = f"trace-unit-test-{uuid.uuid4().hex}"
experiment = mlflow.set_experiment(experiment_name)
# Reduce retries for speed up tests
monkeypatch.setenv("MLFLOW_HTTP_REQUEST_MAX_RETRIES", "1")
yield
purge_traces(experiment_id=experiment.experiment_id)
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def tracking_uri_mock(tmp_path, request):
if "notrackingurimock" not in request.keywords:
tracking_uri = path_to_local_sqlite_uri(tmp_path / f"{uuid.uuid4().hex}.sqlite")
with _use_tracking_uri(tracking_uri):
yield tracking_uri
else:
yield None
@pytest.fixture(autouse=True)
def reset_active_experiment_id():
yield
mlflow.tracking.fluent._active_experiment_id = None
os.environ.pop("MLFLOW_EXPERIMENT_ID", None)
@pytest.fixture(autouse=True)
def reset_mlflow_uri():
yield
# Resetting these environment variables cause sqlalchemy store tests to run with a sqlite
# database instead of mysql/postgresql/mssql.
if "DISABLE_RESET_MLFLOW_URI_FIXTURE" not in os.environ:
os.environ.pop("MLFLOW_TRACKING_URI", None)
os.environ.pop("MLFLOW_REGISTRY_URI", None)
try:
from mlflow.tracking import set_registry_uri
# clean up the registry URI to avoid side effects
set_registry_uri(None)
except ImportError:
# tracing sdk does not have the registry module
pass
@pytest.fixture(autouse=True)
def reset_tracing():
"""
Reset the global state of the tracing feature.
This fixture is auto-applied for cleaning up the global state between tests
to avoid side effects.
"""
yield
# Reset OpenTelemetry and MLflow tracer setup
mlflow.tracing.reset()
# Clear other global state and singletons
_set_last_active_trace_id(None)
_TRACE_BUFFER.clear()
InMemoryTraceManager.reset()
IPythonTraceDisplayHandler._instance = None
# Reset opentelemetry tracer provider as well
trace_api._TRACER_PROVIDER_SET_ONCE._done = False
trace_api._TRACER_PROVIDER = None
def _is_span_active():
span = trace_api.get_current_span()
return (span is not None) and not isinstance(span, trace_api.NonRecordingSpan)
@pytest.fixture(autouse=True)
def validate_trace_finish():
"""
Validate all spans are finished and detached from the context by the end of the each test.
Leaked span is critical problem and also hard to find without an explicit check.
"""
# When the span is leaked, it causes confusing test failure in the subsequent tests. To avoid
# this and make the test failure more clear, we fail first here.
if _is_span_active():
pytest.skip(reason="A leaked active span is found before starting the test.")
yield
assert not _is_span_active(), (
"A span is still active at the end of the test. All spans must be finished "
"and detached from the context before the test ends. The leaked span context "
"may cause other subsequent tests to fail."
)
@pytest.fixture(autouse=True, scope="session")
def enable_test_mode_by_default_for_autologging_integrations():
"""
Run all MLflow tests in autologging test mode, ensuring that errors in autologging patch code
are raised and detected. For more information about autologging test mode, see the docstring
for :py:func:`mlflow.utils.autologging_utils._is_testing()`.
"""
yield from enable_test_mode()
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def clean_up_leaked_runs():
"""
Certain test cases validate safety API behavior when runs are leaked. Leaked runs that
are not cleaned up between test cases may result in cascading failures that are hard to
debug. Accordingly, this fixture attempts to end any active runs it encounters and
throws an exception (which reported as an additional error in the pytest execution output).
"""
try:
yield
assert not mlflow.active_run(), (
"test case unexpectedly leaked a run. Run info: {}. Run data: {}".format(
mlflow.active_run().info, mlflow.active_run().data
)
)
finally:
while mlflow.active_run():
mlflow.end_run()
def _called_in_save_model():
for frame in inspect.stack()[::-1]:
if frame.function == "save_model":
return True
return False
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def prevent_infer_pip_requirements_fallback(request):
"""
Prevents `mlflow.models.infer_pip_requirements` from falling back in `mlflow.*.save_model`
unless explicitly disabled via `pytest.mark.allow_infer_pip_requirements_fallback`.
"""
from mlflow.utils.environment import _INFER_PIP_REQUIREMENTS_GENERAL_ERROR_MESSAGE
def new_exception(msg, *_, **__):
if msg == _INFER_PIP_REQUIREMENTS_GENERAL_ERROR_MESSAGE and _called_in_save_model():
raise Exception(
"`mlflow.models.infer_pip_requirements` should not fall back in"
"`mlflow.*.save_model` during test"
)
if "allow_infer_pip_requirements_fallback" not in request.keywords:
with mock.patch("mlflow.utils.environment._logger.exception", new=new_exception):
yield
else:
yield
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def clean_up_mlruns_directory(request):
"""
Clean up an `mlruns` directory on each test module teardown on CI to save the disk space.
"""
yield
# Only run this fixture on CI.
if "GITHUB_ACTIONS" not in os.environ:
return
mlruns_dir = os.path.join(request.config.rootpath, "mlruns")
if os.path.exists(mlruns_dir):
try:
shutil.rmtree(mlruns_dir)
except OSError:
if is_windows():
raise
# `shutil.rmtree` can't remove files owned by root in a docker container.
subprocess.check_call(["sudo", "rm", "-rf", mlruns_dir])
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def clean_up_last_logged_model_id():
"""
Clean up the last logged model ID stored in a thread local var.
"""
_reset_last_logged_model_id()
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def clean_up_last_active_run():
_last_active_run_id.set(None)
@pytest.fixture(scope="module", autouse=not IS_TRACING_SDK_ONLY)
def clean_up_envs():
"""
Clean up virtualenvs and conda environments created during tests to save disk space.
"""
yield
if "GITHUB_ACTIONS" in os.environ:
from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root
shutil.rmtree(_get_mlflow_virtualenv_root(), ignore_errors=True)
if not is_windows():
conda_info = json.loads(subprocess.check_output(["conda", "info", "--json"], text=True))
root_prefix = conda_info["root_prefix"]
regex = re.compile(r"mlflow-\w{32,}")
for env in conda_info["envs"]:
if env == root_prefix:
continue
if regex.fullmatch(os.path.basename(env)):
shutil.rmtree(env, ignore_errors=True)
@pytest.fixture(scope="session", autouse=True)
def enable_mlflow_testing():
with pytest.MonkeyPatch.context() as mp:
mp.setenv(_MLFLOW_TESTING.name, "TRUE")
yield
@pytest.fixture(scope="session", autouse=not IS_TRACING_SDK_ONLY)
def serve_wheel(request, tmp_path_factory):
"""
Models logged during tests have a dependency on the dev version of MLflow built from
source (e.g., mlflow==1.20.0.dev0) and cannot be served because the dev version is not
available on PyPI. This fixture serves a wheel for the dev version from a temporary
PyPI repository running on localhost and appends the repository URL to the
`PIP_EXTRA_INDEX_URL` environment variable to make the wheel available to pip.
"""
from tests.helper_functions import get_safe_port
if "COPILOT_AGENT_ACTION" in os.environ:
yield # pytest expects a generator fixture to yield
return
if not request.config.getoption("--serve-wheel"):
yield # pytest expects a generator fixture to yield
return
root = tmp_path_factory.mktemp("root")
mlflow_dir = root.joinpath("mlflow")
mlflow_dir.mkdir()
port = get_safe_port()
try:
repo_root = subprocess.check_output(
[
"git",
"rev-parse",
"--show-toplevel",
],
text=True,
).strip()
except subprocess.CalledProcessError:
# Some tests run in a Docker container where git is not installed.
# In this case, assume we're in the root of the repo.
repo_root = "."
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"wheel",
"--wheel-dir",
mlflow_dir,
"--no-deps",
repo_root,
],
)
with subprocess.Popen(
[
sys.executable,
"-m",
"http.server",
str(port),
],
cwd=root,
) as prc:
try:
url = f"http://localhost:{port}"
if existing_url := os.environ.get("PIP_EXTRA_INDEX_URL"):
url = f"{existing_url} {url}"
os.environ["PIP_EXTRA_INDEX_URL"] = url
# Set the `UV_INDEX` environment variable to allow fetching the wheel from the
# url when using `uv` as environment manager
os.environ["UV_INDEX"] = f"mlflow={url}"
yield
finally:
prc.terminate()
@pytest.fixture
def mock_s3_bucket():
"""
Creates a mock S3 bucket using moto
Returns:
The name of the mock bucket.
"""
import boto3
import moto
with moto.mock_s3():
bucket_name = "mock-bucket"
s3_client = boto3.client("s3")
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
@pytest.fixture
def tmp_sqlite_uri(tmp_path):
path = tmp_path.joinpath("mlflow.db").as_uri()
return ("sqlite://" if is_windows() else "sqlite:////") + path[len("file://") :]
@pytest.fixture
def mock_databricks_serving_with_tracing_env(monkeypatch):
monkeypatch.setenv("IS_IN_DB_MODEL_SERVING_ENV", "true")
monkeypatch.setenv("ENABLE_MLFLOW_TRACING", "true")
@pytest.fixture(params=[True, False])
def mock_is_in_databricks(request):
with mock.patch(
"mlflow.models.model.is_in_databricks_runtime", return_value=request.param
) as mock_databricks:
yield mock_databricks
@pytest.fixture(autouse=not IS_TRACING_SDK_ONLY)
def reset_active_model_context():
yield
clear_active_model()
@pytest.fixture(autouse=True)
def clean_up_telemetry_threads():
yield
if client := get_telemetry_client():
client._clean_up()
| TestResult |
python | euske__pdfminer | pdfminer/pdftypes.py | {
"start": 4223,
"end": 8547
} | class ____(PDFObject):
def __init__(self, attrs, rawdata, decipher=None):
assert isinstance(attrs, dict)
self.attrs = attrs
self.rawdata = rawdata
self.decipher = decipher
self.data = None
self.objid = None
self.genno = None
return
def set_objid(self, objid, genno):
self.objid = objid
self.genno = genno
return
def __repr__(self):
if self.data is None:
assert self.rawdata is not None
return '<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs)
else:
assert self.data is not None
return '<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs)
def __contains__(self, name):
return name in self.attrs
def __getitem__(self, name):
return self.attrs[name]
def get(self, name, default=None):
return self.attrs.get(name, default)
def get_any(self, names, default=None):
for name in names:
if name in self.attrs:
return self.attrs[name]
return default
def get_filters(self):
filters = self.get_any(('F', 'Filter'))
params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {})
if not filters:
return []
if not isinstance(filters, list):
filters = [filters]
if not isinstance(params, list):
# Make sure the parameters list is the same as filters.
params = [params]*len(filters)
if STRICT and len(params) != len(filters):
raise PDFException("Parameters len filter mismatch")
return zip(filters, params)
def decode(self):
assert self.data is None and self.rawdata is not None
data = self.rawdata
if self.decipher:
# Handle encryption
data = self.decipher(self.objid, self.genno, data, self.attrs)
filters = self.get_filters()
if not filters:
self.data = data
self.rawdata = None
return
for (f,params) in filters:
if f in LITERALS_FLATE_DECODE:
# will get errors if the document is encrypted.
try:
data = zlib.decompress(data)
except zlib.error as e:
if STRICT:
raise PDFException('Invalid zlib bytes: %r, %r' % (e, data))
data = b''
elif f in LITERALS_LZW_DECODE:
data = lzwdecode(data)
elif f in LITERALS_ASCII85_DECODE:
data = ascii85decode(data)
elif f in LITERALS_ASCIIHEX_DECODE:
data = asciihexdecode(data)
elif f in LITERALS_RUNLENGTH_DECODE:
data = rldecode(data)
elif f in LITERALS_CCITTFAX_DECODE:
data = ccittfaxdecode(data, params)
elif f in LITERALS_DCT_DECODE:
# This is probably a JPG stream - it does not need to be decoded twice.
# Just return the stream to the user.
pass
elif f == LITERAL_CRYPT:
# not yet..
raise PDFNotImplementedError('/Crypt filter is unsupported')
else:
raise PDFNotImplementedError('Unsupported filter: %r' % f)
# apply predictors
if 'Predictor' in params:
pred = int_value(params['Predictor'])
if pred == 1:
# no predictor
pass
elif 10 <= pred:
# PNG predictor
colors = int_value(params.get('Colors', 1))
columns = int_value(params.get('Columns', 1))
bitspercomponent = int_value(params.get('BitsPerComponent', 8))
data = apply_png_predictor(pred, colors, columns, bitspercomponent, data)
else:
raise PDFNotImplementedError('Unsupported predictor: %r' % pred)
self.data = data
self.rawdata = None
return
def get_data(self):
if self.data is None:
self.decode()
return self.data
def get_rawdata(self):
return self.rawdata
| PDFStream |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.