language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 10375,
"end": 13108
} | class ____(SimpleTestCase):
class Choices(models.IntegerChoices):
A = 1
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(f.clean("2", None), 2)
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
with self.assertRaises(ValidationError):
f.clean("a", None)
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(choices=(("group", ((10, "A"), (20, "B"))), (30, "C")))
self.assertEqual(10, f.clean(10, None))
def test_choices_validation_supports_named_groups_dicts(self):
f = models.IntegerField(choices={"group": ((10, "A"), (20, "B")), 30: "C"})
self.assertEqual(10, f.clean(10, None))
def test_choices_validation_supports_named_groups_nested_dicts(self):
f = models.IntegerField(choices={"group": {10: "A", 20: "B"}, 30: "C"})
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
with self.assertRaises(ValidationError):
f.clean(None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertIsNone(f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
with self.assertRaises(ValidationError):
f.clean(None, None)
with self.assertRaises(ValidationError):
f.clean("", None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
with self.assertRaises(ValidationError):
f.clean("0", None)
def test_enum_choices_cleans_valid_string(self):
f = models.IntegerField(choices=self.Choices)
self.assertEqual(f.clean("1", None), 1)
def test_enum_choices_invalid_input(self):
f = models.IntegerField(choices=self.Choices)
with self.assertRaises(ValidationError):
f.clean("A", None)
with self.assertRaises(ValidationError):
f.clean("3", None)
def test_callable_choices(self):
def get_choices():
return {i: str(i) for i in range(3)}
f = models.IntegerField(choices=get_choices)
for i in get_choices():
with self.subTest(i=i):
self.assertEqual(i, f.clean(i, None))
with self.assertRaises(ValidationError):
f.clean("A", None)
with self.assertRaises(ValidationError):
f.clean("3", None)
| ValidationTests |
python | keras-team__keras | keras/src/trainers/data_adapters/tf_dataset_adapter_test.py | {
"start": 297,
"end": 13691
} | class ____(testing.TestCase):
def test_basic_flow(self):
x = tf.random.normal((34, 4))
y = tf.random.normal((34, 2))
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
def _test_class_weights(self, target_encoding="int"):
x = np.random.random((4, 2))
if target_encoding == "int":
y = np.array([[0], [1], [2], [3]], dtype="int64")
else:
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
class_weight = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 3)
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_class_weights_int_targets(self):
self._test_class_weights(target_encoding="int")
def test_class_weights_categorical_targets(self):
self._test_class_weights(target_encoding="categorical")
def test_builtin_prefetch(self):
dataset = tf.data.Dataset.range(42)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertTrue(adapter.builtin_prefetch)
def test_num_batches(self):
dataset = tf.data.Dataset.range(42)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, 42)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, 42)
# Test for Infinite Cardinality
dataset = tf.data.Dataset.range(42)
dataset = dataset.repeat()
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.INFINITE_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
# Test for Unknown Cardinality
dataset = dataset.filter(lambda x: True)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.UNKNOWN_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
def test_invalid_dataset_type(self):
with self.assertRaisesRegex(
ValueError, "Expected argument `dataset` to be a tf.data.Dataset"
):
invalid_data = "This is not a tf.data.Dataset"
tf_dataset_adapter.TFDatasetAdapter(invalid_data)
def test_class_weight_and_sample_weight_together(self):
x = np.random.random((4, 2))
y = np.array([[0], [1], [2], [3]], dtype="int64")
sw = np.array([0.5, 0.5, 0.5, 0.5])
base_ds = tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_different_y_shapes_with_class_weight(self):
x = np.random.random((4, 2))
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
y_sparse = np.array([0, 1, 2, 3], dtype="int64")
base_ds = tf.data.Dataset.from_tensor_slices((x, y_sparse)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_nested_y_with_class_weight(self):
x = np.random.random((4, 2))
# Define two target outputs, y1 and y2, for the dataset
y1 = np.array([0, 1, 2, 3], dtype="int64")
y2 = np.array([0, 1, 2, 3], dtype="int64")
# Create a tf.data Dataset from the input data and two target outputs
base_ds = tf.data.Dataset.from_tensor_slices((x, (y1, y2))).batch(16)
# Define class weights for potential classes in the output
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_class_weights_map_fn_with_sample_weight(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5], [0.5, 0.5]])
y = np.array([[1, 0], [0, 1]])
sw = np.array([1.0, 1.0])
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
class_weights_map_fn(x, y, sw)
def test_class_weights_map_fn_nested_y(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5]])
y1 = np.array([1])
y2 = np.array([0])
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
class_weights_map_fn(x, (y1, y2))
def test_distribute_dataset(self):
x = tf.random.normal((34, 4))
y = tf.random.normal((34, 2))
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
data_distribution = mock.Mock()
# Mimic that there are 2 worker, and each of the worker will get batch
# size of 8
data_distribution.distribute_dataset = mock.MagicMock(
return_value=base_ds.rebatch(8).shard(2, index=0)
)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, distribution=data_distribution
)
self.assertEqual(adapter.num_batches, None)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(bx.shape, (8, 4))
self.assertEqual(by.shape, (8, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(tuple(bx.shape), (8, 4))
self.assertEqual(tuple(by.shape), (8, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS and backend.backend() != "numpy",
reason="Backend does not support sparse tensors",
)
def test_tf_sparse_tensors(self):
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 4)
)
y = tf.SparseTensor(
indices=[[0, 0], [1, 1]], values=[3.0, 4.0], dense_shape=(2, 2)
)
base_ds = tf.data.Dataset.from_tensors((x, y))
adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.SparseTensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.experimental.sparse.BCOO
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
def test_distributed_datasets_from_function_adapter_properties(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0"])
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=2
)
x = tf.random.uniform((32, 4))
y = tf.random.uniform((32, 2))
return tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
adapter = tf_dataset_adapter.TFDatasetAdapter(dist_dataset)
self.assertEqual(adapter.num_batches, 16)
self.assertIsNone(adapter.batch_size)
self.assertIsNone(adapter.has_partial_batch)
self.assertIsNone(adapter.partial_batch_size)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
batch_count = 0
for batch in it:
batch_count += 1
self.assertEqual(len(batch), 2)
data, labels = batch
self.assertIsInstance(data, expected_class)
self.assertIsInstance(labels, expected_class)
self.assertEqual(data.shape, (2, 4))
self.assertEqual(labels.shape, (2, 2))
self.assertEqual(batch_count, 16)
@pytest.mark.requires_trainable_backend
def test_distributed_datasets_from_function_model_integration(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0"])
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=2
)
x = tf.random.uniform((4, 1))
y = tf.random.uniform((4, 2))
return tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
model = Sequential([layers.Dense(2, input_shape=(1,))])
model.compile(optimizer="adam", loss="mse")
history = model.fit(dist_dataset, epochs=1)
self.assertIn("loss", history.history)
| TestTFDatasetAdapter |
python | huggingface__transformers | src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py | {
"start": 1428,
"end": 15150
} | class ____(BaseImageProcessor):
r"""
Constructs a MobileNetV1 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 256}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["MobileNetV1ImageProcessor"]
| MobileNetV1ImageProcessor |
python | django__django | tests/migrations/models.py | {
"start": 126,
"end": 205
} | class ____(models.Model, metaclass=CustomModelBase):
pass
| ModelWithCustomBase |
python | scipy__scipy | scipy/special/tests/test_kolmogorov.py | {
"start": 15773,
"end": 18502
} | class ____:
def test_nan(self):
assert_(np.isnan(kolmogi(np.nan)))
def test_basic(self):
dataset = [(1.0, 0),
(0.96394524366487511, 0.5),
(0.9, 0.571173265106),
(0.5000000000000000, 0.8275735551899077),
(0.26999967167735456, 1),
(0.00067092525577969533, 2)]
dataset = np.asarray(dataset)
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpcdf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon,
# so can use same x-array for both results
x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941,
0.3736868442620478, 0.3345161714909591, 0.3057833329315859,
0.2835052890528936, 0.2655578150208676, 0.2506869966107999,
0.2380971058736669, 0.2272549289962079, 0.2177876361600040,
0.2094254686862041, 0.2019676748836232, 0.1952612948137504,
0.1891874239646641, 0.1836520225050326, 0.1785795904846466])
dataset = np.column_stack([1-epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpsf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon,
# so can use same x-array for both results
x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343,
1.9525136345289607, 2.2027324540033235, 2.4272929437460848,
2.6327688477341593, 2.8233300509220260, 3.0018183401530627,
3.1702735084088891, 3.3302184446307912, 3.4828258153113318,
3.6290214150152051, 3.7695513262825959, 3.9050272690877326,
4.0359582187082550, 4.1627730557884890, 4.2858371743264527])
dataset = np.column_stack([epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([1-epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _k_ki(_p):
return kolmogorov(kolmogi(_p))
p = np.linspace(0.1, 1.0, 10, endpoint=True)
dataset = np.column_stack([p, p])
FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check()
| TestKolmogi |
python | pytorch__pytorch | torch/fx/passes/graph_transform_observer.py | {
"start": 448,
"end": 7785
} | class ____:
__pass_count = 0
def __init__(
self,
gm: GraphModule,
passname: str,
subsystem: Optional[str] = None,
log_url: Optional[str] = None,
):
"""
log_url is inferred to be torch._inductor.config.trace.log_url_for_graph_xform unless otherwise specified
"""
from torch._inductor import config as inductor_config
self.gm = gm
self.passname = passname
self.subsystem = subsystem
if log_url is None:
log_url = inductor_config.trace.log_url_for_graph_xform
self.log_url = log_url
self.active = (
self.log_url is not None
or inductor_config.trace.provenance_tracking_level == 1
)
if self.active:
self.erased_nodes: set[str] = set()
self.created_nodes: set[str] = set()
self.name_to_node: dict[str, Node] = {}
# record graph modules deepcopied from self.gm, so we can remove hooks on them when exiting the context
self.copied_gms: list[GraphModule] = []
self._node_creation_hook = self.get_node_creation_hook()
self._node_erase_hook = self.get_node_erase_hook()
self._node_replace_hook = self.get_node_replace_hook()
self._deepcopy_hook = self.get_deepcopy_hook()
# If log_url is None, we don't log anything
if self.log_url is None:
return
GraphTransformObserver.__pass_count += 1
self.input_dot_graph = FxGraphDrawer(
self.gm,
self.passname,
ignore_getattr=True,
ignore_parameters_and_buffers=True,
).get_dot_graph()
@classmethod
def get_current_pass_count(cls):
return cls.__pass_count
def apply_gm_pass(self, pass_fn: Callable[[GraphModule], T]) -> Optional[T]:
with self:
if not self._check_disable_pass():
return pass_fn(self.gm)
return None
def apply_graph_pass(self, pass_fn: Callable[[Graph], T]) -> Optional[T]:
with self:
if not self._check_disable_pass():
return pass_fn(self.gm.graph)
return None
def _check_disable_pass(self):
if self.subsystem is None:
return False
debug_info = lambda: self.passname # noqa: E731
from torch._inductor.compiler_bisector import CompilerBisector
return CompilerBisector.disable_subsystem(
"inductor", self.subsystem, debug_info
)
def __enter__(self):
if not self.active:
return self
self.gm._register_create_node_hook(self._node_creation_hook)
self.gm._register_erase_node_hook(self._node_erase_hook)
self.gm._register_replace_node_hook(self._node_replace_hook)
self.gm._register_deepcopy_hook(self._deepcopy_hook)
self.erased_nodes.clear()
self.created_nodes.clear()
self.name_to_node.clear()
self.copied_gms.clear()
for node in self.gm.graph.nodes:
self.name_to_node[node.name] = node
return self
def __exit__(self, type, value, tb):
if not self.active:
return
for gm in self.copied_gms + [self.gm]:
gm._unregister_create_node_hook(self._node_creation_hook)
gm._unregister_erase_node_hook(self._node_erase_hook)
gm._unregister_replace_node_hook(self._node_replace_hook)
gm._unregister_deepcopy_hook(self._deepcopy_hook)
if self.log_url is None:
return
if len(self.created_nodes) > 0 or len(self.erased_nodes) > 0:
for e in self.input_dot_graph.get_node_list():
if e.get_name() in self.erased_nodes:
e.obj_dict["attributes"]["fillcolor"] = "yellow"
else:
e.obj_dict["attributes"]["fillcolor"] = "grey"
assert self.log_url is not None
self.input_dot_graph.write(
os.path.join(
self.log_url,
f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_input_graph.dot",
)
)
output_dot_graph = FxGraphDrawer(
self.gm,
self.passname,
ignore_getattr=True,
ignore_parameters_and_buffers=True,
).get_dot_graph()
for e in output_dot_graph.get_node_list():
if e.get_name() in self.created_nodes:
e.obj_dict["attributes"]["fillcolor"] = "yellow"
else:
e.obj_dict["attributes"]["fillcolor"] = "grey"
output_dot_graph.write(
os.path.join(
self.log_url,
f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_output_graph.dot",
)
)
def get_node_creation_hook(self):
# We have to return a function instead of using a class method directly
# to avoid max recursion issue when deepcopy a graph module within the context manager.
def on_node_creation(node):
self.created_nodes.add(node.name)
self.name_to_node[node.name] = node
source = NodeSource(None, self.passname, NodeSourceAction.CREATE)
if "from_node" not in node.meta:
node.meta["from_node"] = [source]
else:
node.meta["from_node"].append(source)
return on_node_creation
def get_node_erase_hook(self):
def on_node_erase(node):
self.erased_nodes.add(node.name)
self.name_to_node.pop(node.name, None)
return on_node_erase
def get_node_replace_hook(self):
def on_node_replace(old: Node, new: str, user: Node):
# Update node meta when replacing old node with new node
new_node = self.name_to_node.get(new, None)
if not new_node:
return
assert isinstance(new_node, Node)
# replace hook is called once for each user of old
# this avoids adding duplicated source nodes
added_nodes = {s.name for s in new_node.meta.get("from_node", [])}
if old.name in added_nodes:
return
action = [NodeSourceAction.REPLACE]
if new_node.name in self.created_nodes:
action.append(NodeSourceAction.CREATE)
def created_this_pass(source):
return source.pass_name == self.passname and source.action == [
NodeSourceAction.CREATE
]
# remove redundant source added on node creation
new_from_node = new_node.meta.get("from_node", [])
new_from_node = [
source for source in new_from_node if not created_this_pass(source)
]
# add new source
new_node_source = NodeSource(old, self.passname, action)
new_from_node.append(new_node_source)
new_node.meta["from_node"] = new_from_node
return on_node_replace
def get_deepcopy_hook(self):
def on_deepcopy(gm):
self.copied_gms.append(gm)
return on_deepcopy
| GraphTransformObserver |
python | plotly__plotly.py | plotly/graph_objs/isosurface/colorbar/title/_font.py | {
"start": 233,
"end": 9929
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.colorbar.title"
_path_str = "isosurface.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.isosurface.col
orbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | astropy__astropy | astropy/modeling/tests/test_mappings.py | {
"start": 3980,
"end": 5905
} | class ____:
def test___init__(self):
# Set values
model = UnitsMapping(
((u.m, None),),
input_units_equivalencies="test_eqiv",
input_units_allow_dimensionless=True,
name="test",
)
assert model._mapping == ((u.m, None),)
assert model._input_units_strict == {"x": True}
assert model.input_units_equivalencies == "test_eqiv"
assert model.input_units_allow_dimensionless == {"x": True}
assert model.name == "test"
assert model._input_units == {"x": u.m}
# Default values
model = UnitsMapping(((u.K, None),))
assert model._mapping == ((u.K, None),)
assert model._input_units_strict == {"x": True}
assert model.input_units_equivalencies is None
assert model.input_units_allow_dimensionless == {"x": False}
assert model.name is None
assert model._input_units == {"x": u.K}
# Error
MESSAGE = r"If one return unit is None, then all must be None"
with pytest.raises(ValueError, match=MESSAGE):
UnitsMapping(((u.m, None), (u.m, u.K)))
def test_evaluate(self):
model = UnitsMapping(((u.m, None),))
assert model(10 * u.m) == 10
model = UnitsMapping(((u.m, u.K),))
assert model(10 * u.m) == 10 * u.K
model = UnitsMapping(
((u.m, None), (u.K, None)),
)
assert model(10 * u.m, 20 * u.K) == (10, 20)
model = UnitsMapping(
((u.m, u.K), (u.K, u.m)),
)
assert model(10 * u.m, 20 * u.K) == (10 * u.K, 20 * u.m)
def test_repr(self):
model = UnitsMapping(((u.m, None),), name="foo")
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),), name='foo')>"
model = UnitsMapping(((u.m, None),))
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),))>"
| TestUnitsMapping |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/buffer.py | {
"start": 1349,
"end": 1523
} | class ____(Enum):
"The validation state of a buffer. This is set after the validation."
VALID = "VALID"
INVALID = "INVALID"
UNKNOWN = "UNKNOWN"
| ValidationState |
python | Farama-Foundation__Gymnasium | gymnasium/envs/toy_text/cliffwalking.py | {
"start": 406,
"end": 13343
} | class ____(Env):
"""
Cliff walking involves crossing a gridworld from start to goal while avoiding falling off a cliff.
## Description
The game starts with the player at location [3, 0] of the 4x12 grid world with the
goal located at [3, 11]. If the player reaches the goal the episode ends.
A cliff runs along [3, 1..10]. If the player moves to a cliff location it
returns to the start location.
The player makes moves until they reach the goal.
Adapted from Example 6.6 (page 132) from Reinforcement Learning: An Introduction
by Sutton and Barto [<a href="#cliffwalk_ref">1</a>].
The cliff can be chosen to be slippery (disabled by default) so the player may move perpendicular
to the intended direction sometimes (see <a href="#is_slippy">`is_slippery`</a>).
With inspiration from:
[https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py](https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py)
## Action Space
The action shape is `(1,)` in the range `{0, 3}` indicating
which direction to move the player.
- 0: Move up
- 1: Move right
- 2: Move down
- 3: Move left
## Observation Space
There are 3 x 12 + 1 possible states. The player cannot be at the cliff, nor at
the goal as the latter results in the end of the episode. What remains are all
the positions of the first 3 rows plus the bottom-left cell.
The observation is a value representing the player's current position as
current_row * ncols + current_col (where both the row and col start at 0).
For example, the starting position can be calculated as follows: 3 * 12 + 0 = 36.
The observation is returned as an `int()`.
## Starting State
The episode starts with the player in state `[36]` (location [3, 0]).
## Reward
Each time step incurs -1 reward, unless the player stepped into the cliff,
which incurs -100 reward.
## Episode End
The episode terminates when the player enters state `[47]` (location [3, 11]).
## Information
`step()` and `reset()` return a dict with the following keys:
- "p" - transition proability for the state.
As cliff walking is not stochastic, the transition probability returned always 1.0.
## Arguments
```python
import gymnasium as gym
gym.make('CliffWalking-v1')
```
## References
<a id="cliffwalk_ref"></a>[1] R. Sutton and A. Barto, “Reinforcement Learning:
An Introduction” 2020. [Online]. Available: [http://www.incompleteideas.net/book/RLbook2020.pdf](http://www.incompleteideas.net/book/RLbook2020.pdf)
## Version History
- v1: Add slippery version of cliffwalking
- v0: Initial version release
"""
metadata = {
"render_modes": ["human", "rgb_array", "ansi"],
"render_fps": 4,
}
def __init__(self, render_mode: str | None = None, is_slippery: bool = False):
self.shape = (4, 12)
self.start_state_index = np.ravel_multi_index((3, 0), self.shape)
self.nS = np.prod(self.shape)
self.nA = 4
self.is_slippery = is_slippery
# Cliff Location
self._cliff = np.zeros(self.shape, dtype=bool)
self._cliff[3, 1:-1] = True
# Calculate transition probabilities and rewards
self.P = {}
for s in range(self.nS):
position = np.unravel_index(s, self.shape)
self.P[s] = {a: [] for a in range(self.nA)}
self.P[s][UP] = self._calculate_transition_prob(position, UP)
self.P[s][RIGHT] = self._calculate_transition_prob(position, RIGHT)
self.P[s][DOWN] = self._calculate_transition_prob(position, DOWN)
self.P[s][LEFT] = self._calculate_transition_prob(position, LEFT)
# Calculate initial state distribution
# We always start in state (3, 0)
self.initial_state_distrib = np.zeros(self.nS)
self.initial_state_distrib[self.start_state_index] = 1.0
self.observation_space = spaces.Discrete(self.nS)
self.action_space = spaces.Discrete(self.nA)
self.render_mode = render_mode
# pygame utils
self.cell_size = (60, 60)
self.window_size = (
self.shape[1] * self.cell_size[1],
self.shape[0] * self.cell_size[0],
)
self.window_surface = None
self.clock = None
self.elf_images = None
self.start_img = None
self.goal_img = None
self.cliff_img = None
self.mountain_bg_img = None
self.near_cliff_img = None
self.tree_img = None
def _limit_coordinates(self, coord: np.ndarray) -> np.ndarray:
"""Prevent the agent from falling out of the grid world."""
coord[0] = min(coord[0], self.shape[0] - 1)
coord[0] = max(coord[0], 0)
coord[1] = min(coord[1], self.shape[1] - 1)
coord[1] = max(coord[1], 0)
return coord
def _calculate_transition_prob(
self, current: list[int] | np.ndarray, move: int
) -> list[tuple[float, Any, int, bool]]:
"""Determine the outcome for an action. Transition Prob is always 1.0.
Args:
current: Current position on the grid as (row, col)
delta: Change in position for transition
Returns:
Tuple of ``(transition_probability, new_state, reward, terminated)``
where `transition_probability` is 1 if the environment is not slippery, otherwise 1/3 for `move`
and the perpendicular moves.
"""
if not self.is_slippery:
deltas = [POSITION_MAPPING[move]]
else:
deltas = [
POSITION_MAPPING[act] for act in [(move - 1) % 4, move, (move + 1) % 4]
]
outcomes = []
for delta in deltas:
new_position = np.array(current) + np.array(delta)
new_position = self._limit_coordinates(new_position).astype(int)
new_state = np.ravel_multi_index(tuple(new_position), self.shape)
if self._cliff[tuple(new_position)]:
outcomes.append((1 / len(deltas), self.start_state_index, -100, False))
else:
terminal_state = (self.shape[0] - 1, self.shape[1] - 1)
is_terminated = tuple(new_position) == terminal_state
outcomes.append((1 / len(deltas), new_state, -1, is_terminated))
return outcomes
def step(self, a):
transitions = self.P[self.s][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, t = transitions[i]
self.s = s
self.lastaction = a
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return int(s), r, t, False, {"prob": p}
def reset(self, *, seed: int | None = None, options: dict | None = None):
super().reset(seed=seed)
self.s = categorical_sample(self.initial_state_distrib, self.np_random)
self.lastaction = None
if self.render_mode == "human":
self.render()
return int(self.s), {"prob": 1}
def render(self):
if self.render_mode is None:
assert self.spec is not None
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")'
)
return
if self.render_mode == "ansi":
return self._render_text()
else:
return self._render_gui(self.render_mode)
def _render_gui(self, mode):
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[toy-text]"`'
) from e
if self.window_surface is None:
pygame.init()
if mode == "human":
pygame.display.init()
pygame.display.set_caption("CliffWalking")
self.window_surface = pygame.display.set_mode(self.window_size)
else: # rgb_array
self.window_surface = pygame.Surface(self.window_size)
if self.clock is None:
self.clock = pygame.time.Clock()
if self.elf_images is None:
hikers = [
path.join(path.dirname(__file__), "img/elf_up.png"),
path.join(path.dirname(__file__), "img/elf_right.png"),
path.join(path.dirname(__file__), "img/elf_down.png"),
path.join(path.dirname(__file__), "img/elf_left.png"),
]
self.elf_images = [
pygame.transform.scale(pygame.image.load(f_name), self.cell_size)
for f_name in hikers
]
if self.start_img is None:
file_name = path.join(path.dirname(__file__), "img/stool.png")
self.start_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.goal_img is None:
file_name = path.join(path.dirname(__file__), "img/cookie.png")
self.goal_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.mountain_bg_img is None:
bg_imgs = [
path.join(path.dirname(__file__), "img/mountain_bg1.png"),
path.join(path.dirname(__file__), "img/mountain_bg2.png"),
]
self.mountain_bg_img = [
pygame.transform.scale(pygame.image.load(f_name), self.cell_size)
for f_name in bg_imgs
]
if self.near_cliff_img is None:
near_cliff_imgs = [
path.join(path.dirname(__file__), "img/mountain_near-cliff1.png"),
path.join(path.dirname(__file__), "img/mountain_near-cliff2.png"),
]
self.near_cliff_img = [
pygame.transform.scale(pygame.image.load(f_name), self.cell_size)
for f_name in near_cliff_imgs
]
if self.cliff_img is None:
file_name = path.join(path.dirname(__file__), "img/mountain_cliff.png")
self.cliff_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
for s in range(self.nS):
row, col = np.unravel_index(s, self.shape)
pos = (col * self.cell_size[0], row * self.cell_size[1])
check_board_mask = row % 2 ^ col % 2
self.window_surface.blit(self.mountain_bg_img[check_board_mask], pos)
if self._cliff[row, col]:
self.window_surface.blit(self.cliff_img, pos)
if row < self.shape[0] - 1 and self._cliff[row + 1, col]:
self.window_surface.blit(self.near_cliff_img[check_board_mask], pos)
if s == self.start_state_index:
self.window_surface.blit(self.start_img, pos)
if s == self.nS - 1:
self.window_surface.blit(self.goal_img, pos)
if s == self.s:
elf_pos = (pos[0], pos[1] - 0.1 * self.cell_size[1])
last_action = self.lastaction if self.lastaction is not None else 2
self.window_surface.blit(self.elf_images[last_action], elf_pos)
if mode == "human":
pygame.event.pump()
pygame.display.update()
self.clock.tick(self.metadata["render_fps"])
else: # rgb_array
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.window_surface)), axes=(1, 0, 2)
)
def _render_text(self):
outfile = StringIO()
for s in range(self.nS):
position = np.unravel_index(s, self.shape)
if self.s == s:
output = " x "
# Print terminal state
elif position == (3, 11):
output = " T "
elif self._cliff[position]:
output = " C "
else:
output = " o "
if position[1] == 0:
output = output.lstrip()
if position[1] == self.shape[1] - 1:
output = output.rstrip()
output += "\n"
outfile.write(output)
outfile.write("\n")
with closing(outfile):
return outfile.getvalue()
def close(self):
if self.window_surface is not None:
import pygame
pygame.display.quit()
pygame.quit()
# Elf and stool from https://franuka.itch.io/rpg-snow-tileset
# All other assets by ____
| CliffWalkingEnv |
python | pytorch__pytorch | torch/distributed/checkpoint/staging.py | {
"start": 13956,
"end": 19299
} | class ____(AsyncStager):
"""
An AsyncStager implementation that replicates state_dict across training ranks
using PGTransport.
Args:
pg: ProcessGroup for distributed communication
timeout: Timeout for communication operations
device: Device to use for tensor operations
storage_dir: Directory to store persisted state_dicts
Warning: This is experimental and subject to change.
"""
_synchronize_after_execute: bool = False
def __init__(
self,
pg: ProcessGroup,
timeout: timedelta = timedelta(minutes=30),
device: torch.device = torch.device("cpu"),
storage_dir: Optional[str] = None,
):
self._pg = pg
self._timeout = timeout
# pyrefly: ignore [read-only]
self._device = device
self._transport = PGTransport(pg, timeout, device, None)
# Set up storage directory for persisting exchanged state_dicts
if storage_dir is None:
self._storage_dir = tempfile.mkdtemp(prefix="replication_stager_")
else:
self._storage_dir = storage_dir
os.makedirs(self._storage_dir, exist_ok=True)
def stage(
self, state_dict: STATE_DICT_TYPE
) -> Union[Future[STATE_DICT_TYPE], STATE_DICT_TYPE]:
"""
Stage the state_dict by replicating it across ranks. Returns a state_dict representing
the received replica.
Perform the actual replication logic. Creates bidirectional pairs where each rank exchanges
state_dict with its partner at (rank + world_size//2) % world_size.
Uses simple rank-based ordering to prevent deadlocks.
Assumes world_size is always even.
"""
if not dist.is_initialized():
return state_dict
world_size = dist.get_world_size()
current_rank = dist.get_rank()
# Calculate partner rank using half-world offset
# creates bidirectional pairs for replication.
offset = world_size // 2
partner_rank = (current_rank + offset) % world_size
# Use simple rank-based ordering to prevent deadlocks.
# Lower-numbered rank sends first, higher-numbered rank receives first.
if current_rank < partner_rank:
# Send first, then receive
self._transport.send_checkpoint([partner_rank], state_dict)
received_state_dict = self._transport.recv_checkpoint(partner_rank)
else:
# Receive first, then send
received_state_dict = self._transport.recv_checkpoint(partner_rank)
self._transport.send_checkpoint([partner_rank], state_dict)
# Persist the received state_dict for future discoverability
received_state_dict = cast(STATE_DICT_TYPE, received_state_dict)
self._persist_state_dict(received_state_dict, current_rank, partner_rank)
return received_state_dict
def _persist_state_dict(
self, state_dict: STATE_DICT_TYPE, current_rank: int, partner_rank: int
) -> None:
"""
Persist the received state_dict to disk for future discoverability.
Only keeps one replica per rank, overwriting any previous replica.
Uses atomic write pattern (temp file + rename).
Args:
state_dict: The state_dict received from partner rank
current_rank: Current rank that received the state_dict
partner_rank: Rank that sent the state_dict
"""
final_path = self._get_persisted_path(current_rank, partner_rank)
temp_path = final_path + ".tmp"
try:
# Ensure parent directory exists and is writable
os.makedirs(os.path.dirname(final_path), exist_ok=True)
# Write to temporary file with explicit flushing
with open(temp_path, "wb") as f:
torch.save(state_dict, f)
# Flush application buffers to OS buffers
f.flush()
# Force OS buffers to disk for durability
os.fsync(f.fileno())
# Atomic rename to final location
os.rename(temp_path, final_path)
except Exception as e:
# Clean up temp file if it exists
try:
if os.path.exists(temp_path):
os.remove(temp_path)
except Exception:
pass # Ignore cleanup errors
# Re-raise the original exception with more context
raise RuntimeError(
f"Failed to persist state_dict from rank {partner_rank} to rank {current_rank}: {e}"
) from e
def _get_persisted_path(self, current_rank: int, partner_rank: int) -> str:
"""
Get the file path where a state_dict would be persisted.
Args:
current_rank: Current rank
Returns:
File path for the persisted state_dict
"""
filename = f"rank_{current_rank}_replica_partner_{partner_rank}.pt"
return os.path.join(self._storage_dir, filename)
def synchronize_staging(self) -> None:
"""
No-op function, since staging is blocking.
"""
def close(self) -> None:
"""
Clean up resources. Persisted files are intentionally left for future discovery.
"""
| _ReplicationStager |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 28970,
"end": 29771
} | class ____(util.EnsureKWArg):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r"visit_\w+"
def __init__(self, dialect: Dialect):
self.dialect = dialect
def process(self, type_: TypeEngine[Any], **kw: Any) -> str:
if (
type_._variant_mapping
and self.dialect.name in type_._variant_mapping
):
type_ = type_._variant_mapping[self.dialect.name]
return type_._compiler_dispatch(self, **kw)
def visit_unsupported_compilation(
self, element: Any, err: Exception, **kw: Any
) -> NoReturn:
raise exc.UnsupportedCompilationError(self, element) from err
# this was a Visitable, but to allow accurate detection of
# column elements this is actually a column element
| TypeCompiler |
python | django__django | tests/test_runner/tests.py | {
"start": 18825,
"end": 20297
} | class ____(AdminScriptTestCase):
"""
Custom runners can add command line arguments. The runner is specified
through a settings file.
"""
def setUp(self):
super().setUp()
settings = {
"TEST_RUNNER": "'test_runner.runner.CustomOptionsTestRunner'",
}
self.write_settings("settings.py", sdict=settings)
def test_default_options(self):
args = ["test", "--settings=test_project.settings"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:2:3")
def test_default_and_given_options(self):
args = ["test", "--settings=test_project.settings", "--option_b=foo"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:foo:3")
def test_option_name_and_value_separated(self):
args = ["test", "--settings=test_project.settings", "--option_b", "foo"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:foo:3")
def test_all_options_given(self):
args = [
"test",
"--settings=test_project.settings",
"--option_a=bar",
"--option_b=foo",
"--option_c=31337",
]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "bar:foo:31337")
| CustomTestRunnerOptionsSettingsTests |
python | PrefectHQ__prefect | src/prefect/server/schemas/internal.py | {
"start": 234,
"end": 356
} | class ____(actions.WorkPoolUpdate):
status: Optional[statuses.WorkPoolStatus] = Field(default=None)
| InternalWorkPoolUpdate |
python | doocs__leetcode | solution/2300-2399/2373.Largest Local Values in a Matrix/Solution.py | {
"start": 0,
"end": 377
} | class ____:
def largestLocal(self, grid: List[List[int]]) -> List[List[int]]:
n = len(grid)
ans = [[0] * (n - 2) for _ in range(n - 2)]
for i in range(n - 2):
for j in range(n - 2):
ans[i][j] = max(
grid[x][y] for x in range(i, i + 3) for y in range(j, j + 3)
)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 3755,
"end": 4936
} | class ____(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[DepthAnythingConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
return hidden_state + residual
| DepthAnythingPreActResidualLayer |
python | getsentry__sentry | src/sentry/seer/similarity/utils.py | {
"start": 2214,
"end": 3338
} | class ____:
"""
Lazy-loaded singleton for the tokenizer to avoid expensive initialization at module load time.
"""
def __init__(self) -> None:
self._tokenizer: Tokenizer | None = None
self._lock = threading.RLock()
def get_tokenizer(self) -> Tokenizer:
"""Get the tokenizer instance, initializing it lazily if needed."""
if self._tokenizer is None:
with self._lock:
# Double-check pattern to avoid race conditions
if self._tokenizer is None:
# Try to load from local model first, fallback to remote
if os.path.exists(TOKENIZER_MODEL_PATH):
logger.info("Loading tokenizer from local model: %s", TOKENIZER_MODEL_PATH)
self._tokenizer = Tokenizer.from_file(TOKENIZER_MODEL_PATH)
else:
raise ValueError("Tokenizer model not found")
return self._tokenizer
tokenizerWrapper = TokenizerWrapper()
def get_tokenizer() -> Tokenizer:
return tokenizerWrapper.get_tokenizer()
| TokenizerWrapper |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType27.py | {
"start": 157,
"end": 356
} | class ____: ...
T = TypeVar("T", bound=ClassA)
def func1(cls: type[T]) -> list[type[T]]:
result = [cls]
for c in cls.__subclasses__():
result.extend(func1(c))
return result
| ClassA |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/todo.py | {
"start": 905,
"end": 6320
} | class ____(AgentState):
"""State schema for the todo middleware."""
todos: Annotated[NotRequired[list[Todo]], OmitFromInput]
"""List of todo items for tracking task progress."""
WRITE_TODOS_TOOL_DESCRIPTION = """Use this tool to create and manage a structured task list for your current work session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.
Only use this tool if you think it will be helpful in staying organized. If the user's request is trivial and takes less than 3 steps, it is better to NOT use this tool and just do the task directly.
## When to Use This Tool
Use this tool in these scenarios:
1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions
2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations
3. User explicitly requests todo list - When the user directly asks you to use the todo list
4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)
5. The plan may need future revisions or updates based on results from the first few steps
## How to Use This Tool
1. When you start working on a task - Mark it as in_progress BEFORE beginning work.
2. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation.
3. You can also update future tasks, such as deleting them if they are no longer necessary, or adding new tasks that are necessary. Don't change previously completed tasks.
4. You can make several updates to the todo list at once. For example, when you complete a task, you can mark the next task you need to start as in_progress.
## When NOT to Use This Tool
It is important to skip using this tool when:
1. There is only a single, straightforward task
2. The task is trivial and tracking it provides no benefit
3. The task can be completed in less than 3 trivial steps
4. The task is purely conversational or informational
## Task States and Management
1. **Task States**: Use these states to track progress:
- pending: Task not yet started
- in_progress: Currently working on (you can have multiple tasks in_progress at a time if they are not related to each other and can be run in parallel)
- completed: Task finished successfully
2. **Task Management**:
- Update task status in real-time as you work
- Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
- Complete current tasks before starting new ones
- Remove tasks that are no longer relevant from the list entirely
- IMPORTANT: When you write this todo list, you should mark your first task (or tasks) as in_progress immediately!.
- IMPORTANT: Unless all tasks are completed, you should always have at least one task in_progress to show the user that you are working on something.
3. **Task Completion Requirements**:
- ONLY mark a task as completed when you have FULLY accomplished it
- If you encounter errors, blockers, or cannot finish, keep the task as in_progress
- When blocked, create a new task describing what needs to be resolved
- Never mark a task as completed if:
- There are unresolved issues or errors
- Work is partial or incomplete
- You encountered blockers that prevent completion
- You couldn't find necessary resources or dependencies
- Quality standards haven't been met
4. **Task Breakdown**:
- Create specific, actionable items
- Break complex tasks into smaller, manageable steps
- Use clear, descriptive task names
Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully
Remember: If you only need to make a few tool calls to complete a task, and it is clear what you need to do, it is better to just do the task directly and NOT call this tool at all."""
WRITE_TODOS_SYSTEM_PROMPT = """## `write_todos`
You have access to the `write_todos` tool to help you manage and plan complex objectives.
Use this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.
This tool is very helpful for planning complex objectives, and for breaking down these larger complex objectives into smaller steps.
It is critical that you mark todos as completed as soon as you are done with a step. Do not batch up multiple steps before marking them as completed.
For simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.
Writing todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.
## Important To-Do List Usage Notes to Remember
- The `write_todos` tool should never be called multiple times in parallel.
- Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant."""
@tool(description=WRITE_TODOS_TOOL_DESCRIPTION)
def write_todos(todos: list[Todo], tool_call_id: Annotated[str, InjectedToolCallId]) -> Command:
"""Create and manage a structured task list for your current work session."""
return Command(
update={
"todos": todos,
"messages": [ToolMessage(f"Updated todo list to {todos}", tool_call_id=tool_call_id)],
}
)
| PlanningState |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_getitem.py | {
"start": 14437,
"end": 14844
} | class ____:
@pytest.mark.parametrize("key", [{"a", "b"}, {"a": "a"}])
def test_getitem_dict_and_set_deprecated(self, key):
# GH#42825 enforced in 2.0
df = DataFrame(
[[1, 2], [3, 4]], columns=MultiIndex.from_tuples([("a", 1), ("b", 2)])
)
with pytest.raises(TypeError, match="as an indexer is not supported"):
df[key]
| TestGetitemDeprecatedIndexers |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 4012,
"end": 4186
} | class ____(IntEnum):
NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
| Dither |
python | keras-team__keras | keras/src/wrappers/sklearn_wrapper.py | {
"start": 6443,
"end": 10743
} | class ____(ClassifierMixin, SKLBase):
"""scikit-learn compatible classifier wrapper for Keras models.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
classes_ : array-like, shape=(n_classes,)
The classes labels.
Example:
Here we use a function which creates a basic MLP model dynamically
choosing the input and output shapes. We will use this to create our
scikit-learn model.
``` python
from keras.layers import Dense, Input
from keras.models import Model
def dynamic_model(X, y, loss, layers=[10]):
# Creates a basic MLP model dynamically choosing the input and
# output shapes.
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = Dense(n_outputs, activation="softmax")(hidden)
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
```
You can then use this function to create a scikit-learn compatible model
and fit it on some data.
``` python
from sklearn.datasets import make_classification
from keras.wrappers import SKLearnClassifier
X, y = make_classification(n_samples=1000, n_features=10)
est = SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
)
est.fit(X, y, epochs=5)
```
"""
def _process_target(self, y, reset=False):
"""Classifiers do OHE."""
target_type = type_of_target(y, raise_unknown=True)
if target_type not in ["binary", "multiclass"]:
raise ValueError(
"Only binary and multiclass target types are supported."
f" Target type: {target_type}"
)
if reset:
self._target_encoder = sklearn.pipeline.make_pipeline(
TargetReshaper(),
sklearn.preprocessing.OneHotEncoder(sparse_output=False),
).fit(y)
self.classes_ = np.unique(y)
if len(self.classes_) == 1:
raise ValueError(
"Classifier can't train when only one class is present."
)
return self._target_encoder.transform(y)
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {"poor_score": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.poor_score = True
return tags
@keras_export("keras.wrappers.SKLearnRegressor")
| SKLearnClassifier |
python | huggingface__transformers | src/transformers/models/nemotron/modeling_nemotron.py | {
"start": 43653,
"end": 43961
} | class ____(GenericForTokenClassification, NemotronPreTrainedModel): ...
__all__ = [
"NemotronForQuestionAnswering",
"NemotronForCausalLM",
"NemotronModel",
"NemotronPreTrainedModel",
"NemotronForSequenceClassification",
"NemotronForTokenClassification",
]
| NemotronForTokenClassification |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 2414,
"end": 2624
} | class ____(collections.namedtuple("_EndPoint", ["convertible", "index"])):
"""An endpoint in a graph."""
__slots__ = ()
def __str__(self):
return "{}[{}]".format(self.convertible, self.index)
| _EndPoint |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_storage_transfer_service.py | {
"start": 6254,
"end": 9118
} | class ____:
def test_should_do_nothing_on_empty(self):
body = {}
TransferJobPreprocessor(body=body).process_body()
assert body == {}
@pytest.mark.skipif(boto3 is None, reason="Skipping test because boto3 is not available")
@mock.patch("airflow.providers.google.cloud.operators.cloud_storage_transfer_service.AwsBaseHook")
def test_should_inject_aws_credentials(self, mock_hook):
mock_hook.return_value.get_credentials.return_value = Credentials(
TEST_AWS_ACCESS_KEY_ID, TEST_AWS_ACCESS_SECRET, None
)
body = {TRANSFER_SPEC: deepcopy(SOURCE_AWS)}
body = TransferJobPreprocessor(body=body).process_body()
assert body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE][AWS_ACCESS_KEY] == TEST_AWS_ACCESS_KEY
@mock.patch("airflow.providers.google.cloud.operators.cloud_storage_transfer_service.AwsBaseHook")
def test_should_not_inject_aws_credentials(self, mock_hook):
mock_hook.return_value.get_credentials.return_value = Credentials(
TEST_AWS_ACCESS_KEY_ID, TEST_AWS_ACCESS_SECRET, None
)
body = {TRANSFER_SPEC: deepcopy(SOURCE_AWS_ROLE_ARN)}
body = TransferJobPreprocessor(body=body).process_body()
assert AWS_ACCESS_KEY not in body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE]
@pytest.mark.parametrize("field_attr", [SCHEDULE_START_DATE, SCHEDULE_END_DATE])
def test_should_format_date_from_python_to_dict(self, field_attr):
body = {SCHEDULE: {field_attr: NATIVE_DATE}}
TransferJobPreprocessor(body=body).process_body()
assert body[SCHEDULE][field_attr] == DICT_DATE
def test_should_format_time_from_python_to_dict(self):
body = {SCHEDULE: {START_TIME_OF_DAY: NATIVE_TIME}}
TransferJobPreprocessor(body=body).process_body()
assert body[SCHEDULE][START_TIME_OF_DAY] == DICT_TIME
@pytest.mark.parametrize("field_attr", [SCHEDULE_START_DATE, SCHEDULE_END_DATE])
def test_should_not_change_date_for_dict(self, field_attr):
body = {SCHEDULE: {field_attr: DICT_DATE}}
TransferJobPreprocessor(body=body).process_body()
assert body[SCHEDULE][field_attr] == DICT_DATE
def test_should_not_change_time_for_dict(self):
body = {SCHEDULE: {START_TIME_OF_DAY: DICT_TIME}}
TransferJobPreprocessor(body=body).process_body()
assert body[SCHEDULE][START_TIME_OF_DAY] == DICT_TIME
@time_machine.travel("2018-10-15", tick=False)
def test_should_set_default_schedule(self):
body = {}
TransferJobPreprocessor(body=body, default_schedule=True).process_body()
assert body == {
SCHEDULE: {
SCHEDULE_END_DATE: {"day": 15, "month": 10, "year": 2018},
SCHEDULE_START_DATE: {"day": 15, "month": 10, "year": 2018},
}
}
| TestTransferJobPreprocessor |
python | getsentry__sentry | src/sentry/interfaces/exception.py | {
"start": 3238,
"end": 7195
} | class ____(Interface):
"""
an optional field residing in the exception interface. It carries additional
information about the way the exception was created on the target system.
This includes general exception values obtained from operating system or
runtime APIs, as well as mechanism-specific values.
>>> {
>>> "type": "mach",
>>> "description": "EXC_BAD_ACCESS",
>>> "data": {
>>> "relevant_address": "0x1"
>>> },
>>> "handled": false,
>>> "synthetic": false,
>>> "help_link": "https://developer.apple.com/library/content/qa/qa1367/_index.html",
>>> "meta": {
>>> "mach_exception": {
>>> "exception": 1,
>>> "subcode": 8,
>>> "code": 1
>>> },
>>> "signal": {
>>> "number": 11
>>> }
>>> }
>>> }
"""
@classmethod
def to_python(cls, data, **kwargs):
for key in (
"type",
"synthetic",
"description",
"help_link",
"handled",
"data",
"meta",
"source",
"is_exception_group",
"exception_id",
"parent_id",
):
data.setdefault(key, None)
return super().to_python(data, **kwargs)
def to_json(self):
return prune_empty_keys(
{
"type": self.type,
"synthetic": self.synthetic,
"description": self.description,
"help_link": self.help_link,
"handled": self.handled,
"data": self.data or None,
"meta": prune_empty_keys(self.meta) or None,
"source": self.source,
"is_exception_group": self.is_exception_group,
"exception_id": self.exception_id,
"parent_id": self.parent_id,
}
)
def iter_tags(self):
yield (self.path, self.type)
if self.handled is not None:
yield ("handled", self.handled and "yes" or "no")
def __repr__(self) -> str:
return f"{type(self).__name__} -> id:{self.exception_id}, parent_id:{self.parent_id}, source:{self.source}, type:{self.type}"
def uncontribute_non_stacktrace_variants(variants):
"""If we have multiple variants and at least one has a stacktrace, we
want to mark all non stacktrace variants non contributing. The reason
for this is that otherwise we end up in very generic grouping which has
some negative consequences for the quality of the groups.
"""
if len(variants) <= 1:
return variants
any_stacktrace_contributes = False
non_contributing_components = []
stacktrace_variants = set()
# In case any of the variants has a contributing stacktrace, we want
# to make all other variants non contributing. Thr e
for key, component in variants.items():
if any(
s.contributes for s in component.iter_subcomponents(id="stacktrace", recursive=True)
):
any_stacktrace_contributes = True
stacktrace_variants.add(key)
else:
non_contributing_components.append(component)
if any_stacktrace_contributes:
if len(stacktrace_variants) == 1:
hint_suffix = "but the %s variant does" % next(iter(stacktrace_variants))
else:
# this branch is basically dead because we only have two
# variants right now, but this is so this does not break in
# the future.
hint_suffix = "others do"
for component in non_contributing_components:
component.update(
contributes=False,
hint="ignored because this variant does not contain a "
"stacktrace, but %s" % hint_suffix,
)
return variants
| Mechanism |
python | psf__black | src/blib2to3/pytree.py | {
"start": 18335,
"end": 19863
} | class ____(BasePattern):
def __init__(
self,
type: int | None = None,
content: str | None = None,
name: str | None = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node: NL, results=None) -> bool:
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
| LeafPattern |
python | nedbat__coveragepy | tests/test_coverage.py | {
"start": 35245,
"end": 38936
} | class ____(CoverageTest):
"""Tests of the exclusion feature to mark lines as not covered."""
def test_default(self) -> None:
# A number of forms of pragma comment are accepted.
self.check_coverage(
"""\
a = 1
b = 2 # pragma: no cover
c = 3
d = 4 #pragma NOCOVER
e = 5
f = 6#\tpragma:\tno cover
g = 7
...
i = 9
... # we don't care about this line
k = 11
def foo12(): ... # do nothing
async def bar13(): ...
def method14(self) ->None: ...
def method15( # 15
self,
some_arg: str = "Hello",
): ...
def method19(self): return a[1,...]
def method20(
self,
some_args,
) -> int: ...
x = 24
def method25(
self,
): return a[1,...]
def f28(): print("(well): ... #2 false positive!")
""",
lines=[1, 3, 5, 7, 9, 11, 19, 24, 25],
)
def test_two_excludes(self) -> None:
self.check_coverage(
"""\
a = 1; b = 2
if a == 99:
a = 4 # -cc
b = 5
c = 6 # -xx
assert a == 1 and b == 2
""",
lines=[1, 3, 5, 7],
missing="5",
excludes=["-cc", "-xx"],
)
def test_excluding_elif_suites(self) -> None:
self.check_coverage(
"""\
a = 1; b = 2
if 1==1:
a = 4
b = 5
c = 6
elif 1==0: #pragma: NO COVER
a = 8
b = 9
else:
a = 11
b = 12
assert a == 4 and b == 5 and c == 6
""",
lines=[1, 3, 4, 5, 6, 11, 12, 13],
missing="11-12",
excludes=["#pragma: NO COVER"],
)
def test_excluding_try_except(self) -> None:
self.check_coverage(
"""\
a = 0
try:
a = 1
except: #pragma: NO COVER
a = 99
else:
a = 123
assert a == 123
""",
lines=[1, 2, 3, 7, 8],
missing="",
excludes=["#pragma: NO COVER"],
branchz="",
branchz_missing="",
)
def test_excluding_try_except_stranded_else(self) -> None:
self.check_coverage(
"""\
a = 0
try:
a = 1
raise Exception("foo")
except:
a = 99
else: #pragma: NO COVER
x = 2
assert a == 99
""",
lines=[1, 2, 3, 4, 5, 6, 9],
missing="",
excludes=["#pragma: NO COVER"],
branchz="",
branchz_missing="",
)
def test_excluded_comprehension_branches(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1271
self.check_coverage(
"""\
x, y = [0], [1]
if x == [2]:
raise NotImplementedError # NOCOVPLZ
if all(_ == __ for _, __ in zip(x, y)):
raise NotImplementedError # NOCOVPLZ
""",
lines=[1, 2, 4],
missing="",
excludes=["# NOCOVPLZ"],
branchz="23 24 45 4.",
branchz_missing="",
)
| ExcludeTest |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 4130,
"end": 4487
} | class ____:
demo_path: str
steps: int = 0
strength: float = 1.0
samples_per_update: int = 0
# Setting either of these to None will allow the Optimizer
# to decide these parameters, based on Trainer hyperparams
num_epoch: Optional[int] = None
batch_size: Optional[int] = None
@attr.s(auto_attribs=True)
| BehavioralCloningSettings |
python | walkccc__LeetCode | solutions/1957. Delete Characters to Make Fancy String/1957.py | {
"start": 0,
"end": 190
} | class ____:
def makeFancyString(self, s: str) -> str:
ans = []
for c in s:
if len(ans) < 2 or ans[-1] != c or ans[-2] != c:
ans.append(c)
return ''.join(ans)
| Solution |
python | Pylons__pyramid | tests/test_renderers.py | {
"start": 114,
"end": 3387
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, **kw):
from pyramid.renderers import JSON
return JSON(**kw)
def test_it(self):
renderer = self._makeOne()(None)
result = renderer({'a': 1}, {})
self.assertEqual(result, '{"a": 1}')
def test_with_request_content_type_notset(self):
request = testing.DummyRequest()
renderer = self._makeOne()(None)
renderer({'a': 1}, {'request': request})
self.assertEqual(request.response.content_type, 'application/json')
def test_with_request_content_type_set(self):
request = testing.DummyRequest()
request.response.content_type = 'text/mishmash'
renderer = self._makeOne()(None)
renderer({'a': 1}, {'request': request})
self.assertEqual(request.response.content_type, 'text/mishmash')
def test_with_custom_adapter(self):
request = testing.DummyRequest()
from datetime import datetime
def adapter(obj, req):
self.assertEqual(req, request)
return obj.isoformat()
now = datetime.utcnow()
renderer = self._makeOne()
renderer.add_adapter(datetime, adapter)
result = renderer(None)({'a': now}, {'request': request})
self.assertEqual(result, '{"a": "%s"}' % now.isoformat())
def test_with_custom_adapter2(self):
request = testing.DummyRequest()
from datetime import datetime
def adapter(obj, req):
self.assertEqual(req, request)
return obj.isoformat()
now = datetime.utcnow()
renderer = self._makeOne(adapters=((datetime, adapter),))
result = renderer(None)({'a': now}, {'request': request})
self.assertEqual(result, '{"a": "%s"}' % now.isoformat())
def test_with_custom_serializer(self):
class Serializer:
def __call__(self, obj, **kw):
self.obj = obj
self.kw = kw
return 'foo'
serializer = Serializer()
renderer = self._makeOne(serializer=serializer, baz=5)
obj = {'a': 'b'}
result = renderer(None)(obj, {})
self.assertEqual(result, 'foo')
self.assertEqual(serializer.obj, obj)
self.assertEqual(serializer.kw['baz'], 5)
self.assertTrue('default' in serializer.kw)
def test_with_object_adapter(self):
request = testing.DummyRequest()
outerself = self
class MyObject:
def __init__(self, x):
self.x = x
def __json__(self, req):
outerself.assertEqual(req, request)
return {'x': self.x}
objects = [MyObject(1), MyObject(2)]
renderer = self._makeOne()(None)
result = renderer(objects, {'request': request})
self.assertEqual(result, '[{"x": 1}, {"x": 2}]')
def test_with_object_adapter_no___json__(self):
class MyObject:
def __init__(self, x):
self.x = x
objects = [MyObject(1), MyObject(2)]
renderer = self._makeOne()(None)
self.assertRaises(TypeError, renderer, objects, {})
| TestJSON |
python | realpython__materials | python-with-statement/site_checker_v2.py | {
"start": 33,
"end": 732
} | class ____:
def __init__(self, url):
self._url = url
async def __aenter__(self):
self.session = aiohttp.ClientSession()
response = await self.session.get(self._url)
return response
async def __aexit__(self, *_):
await self.session.close()
async def main():
await asyncio.gather(
check("https://realpython.com"),
check("https://pycoders.com"),
)
async def check(url):
async with AsyncSession(url) as response:
print(f"{url}: status -> {response.status}")
html = await response.text()
print(f"{url}: type -> {html[:17].strip()}")
if __name__ == "__main__":
asyncio.run(main())
| AsyncSession |
python | google__jax | jax/experimental/jax2tf/examples/tf_js/quickdraw/quickdraw.py | {
"start": 1436,
"end": 4894
} | class ____(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Conv(features=16, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=32, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=128)(x)
x = nn.relu(x)
x = nn.Dense(features=_NUM_CLASSES.value)(x)
return x
@jax.jit
def apply_model(state, inputs, labels):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(params):
logits = state.apply_fn({'params': params}, inputs)
one_hot = jax.nn.one_hot(labels, _NUM_CLASSES.value)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(state.params)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return grads, loss, accuracy
@jax.jit
def update_model(state, grads):
return state.apply_gradients(grads=grads)
def run_epoch(state, dataset, train=True):
epoch_loss = []
epoch_accuracy = []
for inputs, labels in dataset:
grads, loss, accuracy = apply_model(state, inputs, labels)
if train:
state = update_model(state, grads)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
loss = np.mean(epoch_loss)
accuracy = np.mean(epoch_accuracy)
return state, loss, accuracy
def create_train_state(rng):
quick_draw = QuickDraw()
params = quick_draw.init(rng, jnp.ones((1, 28, 28, 1)))['params']
tx = optax.adam(learning_rate=0.001, b1=0.9, b2=0.999)
return train_state.TrainState.create(
apply_fn=quick_draw.apply, params=params, tx=tx)
def train(state, train_ds, test_ds):
for epoch in range(1, _NUM_EPOCHS.value+1):
start_time = time.time()
state, train_loss, train_accuracy = run_epoch(state, train_ds)
_, test_loss, test_accuracy = run_epoch(state, test_ds, train=False)
print(f"Training set accuracy {train_accuracy}")
print(f"Training set loss {train_loss}")
print(f"Test set accuracy {test_accuracy}")
print(f"Test set loss {test_loss}")
epoch_time = time.time() - start_time
print(f"Epoch {epoch} in {epoch_time:0.2f} sec")
return state
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
base_model_path = "/tmp/jax2tf/tf_js_quickdraw"
dataset_path = os.path.join(base_model_path, "data")
classes = input_pipeline.download_dataset(dataset_path, _NUM_CLASSES.value)
assert len(classes) == _NUM_CLASSES.value, "Incorrect number of classes"
print(f"Classes are: {classes}")
print("Loading dataset into memory...")
train_ds, test_ds = input_pipeline.get_datasets(dataset_path, classes)
print(f"Starting training for {_NUM_EPOCHS.value} epochs...")
state = create_train_state(jax.random.PRNGKey(0))
state = train(state, train_ds, test_ds)
tfjs.converters.convert_jax(
apply_fn=state.apply_fn,
params={'params': state.params},
input_signatures=[tf.TensorSpec([1, 28, 28, 1])],
model_dir=os.path.join(base_model_path, 'tfjs_models'))
if __name__ == "__main__":
app.run(main)
| QuickDraw |
python | viewflow__viewflow | viewflow/workflow/migrations/0010_viewflow20.py | {
"start": 155,
"end": 2683
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0009_merge"),
]
operations = [
migrations.RemoveField(
model_name="task",
name="comments",
),
migrations.AddField(
model_name="process",
name="parent_task",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="viewflow.task",
),
),
migrations.AlterField(
model_name="process",
name="created",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="Created"
),
),
migrations.AlterField(
model_name="process",
name="status",
field=models.CharField(
choices=[("CANCELED", "Canceled"), ("DONE", "Done"), ("NEW", "New")],
default="NEW",
max_length=50,
verbose_name="Status",
),
),
migrations.AlterField(
model_name="task",
name="created",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="Created"
),
),
migrations.AlterField(
model_name="task",
name="status",
field=models.CharField(
choices=[
("ASSIGNED", "Assigned"),
("CANCELED", "Canceled"),
("DONE", "Done"),
("ERROR", "Error"),
("NEW", "New"),
("REVIVED", "Revived"),
("SCHEDULED", "Scheduled"),
("STARTED", "Started"),
],
db_index=True,
default="NEW",
max_length=50,
verbose_name="Status",
),
),
migrations.AlterField(
model_name="process",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="task",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| Migration |
python | pypa__warehouse | tests/unit/ip_addresses/test_models.py | {
"start": 273,
"end": 1667
} | class ____:
def test_repr(self, db_request):
ip_address = db_request.ip_address
assert isinstance(repr(ip_address), str)
assert repr(ip_address) == REMOTE_ADDR
def test_invalid_transformed(self, db_request):
ip_address = DBIpAddressFactory(ip_address="wutang")
assert repr(ip_address) == "192.0.2.69"
@pytest.mark.parametrize(
"kwargs",
[
{"ip_address": REMOTE_ADDR, "is_banned": True},
{
"ip_address": REMOTE_ADDR,
"is_banned": True,
"ban_reason": BanReason.AUTHENTICATION_ATTEMPTS,
},
{"ip_address": REMOTE_ADDR, "is_banned": True, "ban_date": sql.func.now()},
{
"ip_address": REMOTE_ADDR,
"is_banned": False,
"ban_reason": BanReason.AUTHENTICATION_ATTEMPTS,
},
{"ip_address": REMOTE_ADDR, "is_banned": False, "ban_date": sql.func.now()},
{
"ip_address": REMOTE_ADDR,
"is_banned": False,
"ban_reason": BanReason.AUTHENTICATION_ATTEMPTS,
"ban_date": sql.func.now(),
},
],
)
def test_ban_data_constraint(self, db_request, kwargs):
with pytest.raises(psycopg.errors.CheckViolation):
DBIpAddressFactory(**kwargs)
| TestIpAddress |
python | walkccc__LeetCode | solutions/2151. Maximum Good People Based on Statements/2151.py | {
"start": 0,
"end": 838
} | class ____:
def maximumGood(self, statements: list[list[int]]) -> int:
n = len(statements)
ans = 0
def isValid(good: list[int]) -> bool:
for i, g in enumerate(good):
if not g: # The i-th person is bad, so no need to check.
continue
for j in range(n):
if statements[i][j] == 2:
continue
if statements[i][j] != good[j]:
return False
return True
def dfs(good: list[int], i: int, count: int) -> None:
nonlocal ans
if i == n:
if isValid(good):
ans = max(ans, count)
return
good.append(0) # Assume the i-th person is bad.
dfs(good, i + 1, count)
good[-1] = 1 # Assume the i-th person is good.
dfs(good, i + 1, count + 1)
good.pop()
dfs([], 0, 0)
return ans
| Solution |
python | django-extensions__django-extensions | django_extensions/management/commands/update_permissions.py | {
"start": 411,
"end": 2953
} | class ____(BaseCommand):
help = (
"reloads permissions for specified apps, or all apps if no args are specified"
)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--apps",
dest="apps",
help="Reload permissions only for apps (comma separated)",
)
parser.add_argument(
"--create-only",
action="store_true",
default=False,
help="Only create missing permissions",
)
parser.add_argument(
"--update-only",
action="store_true",
default=False,
help="Only update permissions",
)
@signalcommand
def handle(self, *args, **options):
if options["apps"]:
app_names = options["apps"].split(",")
apps = [django_apps.get_app_config(x) for x in app_names]
else:
apps = django_apps.get_app_configs()
if options["create_only"]:
do_create, do_update = True, False
elif options["update_only"]:
do_create, do_update = False, True
else:
do_create, do_update = True, True
for app in apps:
if do_create:
# create permissions if they do not exist
create_permissions(app, options["verbosity"])
if do_update:
# update permission name's if changed
for model in app.get_models():
content_type = ContentType.objects.get_for_model(model)
for codename, name in _get_all_permissions(model._meta):
try:
permission = Permission.objects.get(
codename=codename, content_type=content_type
)
except Permission.DoesNotExist:
continue
if permission.name != name:
old_str = str(permission)
permission.name = name
if options["verbosity"] >= 2:
self.stdout.write(
self.style.SUCCESS(
"Update permission '%s' to '%s'"
% (old_str, permission)
)
)
permission.save()
| Command |
python | pennersr__django-allauth | allauth/socialaccount/providers/openstreetmap/provider.py | {
"start": 239,
"end": 841
} | class ____(ProviderAccount):
def get_profile_url(self):
return (
"https://www.openstreetmap.org/user/"
+ self.account.extra_data["display_name"]
)
def get_avatar_url(self):
ret = None
if img := self.account.extra_data.get("img"):
ret = img.get("href")
if not ret:
# Backwards compatible (OSM provider data originating from XML)
ret = self.account.extra_data.get("avatar")
return ret
def get_username(self):
return self.account.extra_data["display_name"]
| OpenStreetMapAccount |
python | sqlalchemy__sqlalchemy | test/orm/test_core_compilation.py | {
"start": 44762,
"end": 61744
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def query_expression_fixture(self):
users, User = (
self.tables.users,
self.classes.User,
)
addresses, Address = (self.tables.addresses, self.classes.Address)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("value", query_expression()),
(
"value_w_default",
query_expression(default_expr=literal(15)),
),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
return User
@testing.fixture
def deferred_fixture(self):
User = self.classes.User
users = self.tables.users
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name": deferred(users.c.name),
"name_upper": column_property(
func.upper(users.c.name), deferred=True
),
},
)
return User
@testing.fixture
def query_expression_w_joinedload_fixture(self):
users, User = (
self.tables.users,
self.classes.User,
)
addresses, Address = (self.tables.addresses, self.classes.Address)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("value", query_expression()),
(
"addresses",
relationship(
Address,
primaryjoin=and_(
addresses.c.user_id == users.c.id,
addresses.c.email_address != None,
),
),
),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
return User
@testing.fixture
def column_property_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("concat", column_property(users.c.id * 2)),
(
"count",
column_property(
select(func.count(addresses.c.id))
.where(
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
),
]
),
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
)
},
)
return User, Address
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses")
},
)
return User, Address
@testing.fixture
def hard_labeled_self_ref_fixture(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
data_lower = column_property(func.lower(data).label("hardcoded"))
as_ = relationship("A")
return A
def test_no_joinedload_embedded(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
subq = stmt.subquery()
s2 = select(subq)
self.assert_compile(
s2,
"SELECT anon_1.id, anon_1.user_id, anon_1.email_address "
"FROM (SELECT addresses.id AS id, addresses.user_id AS "
"user_id, addresses.email_address AS email_address "
"FROM addresses) AS anon_1",
)
def test_with_expr_one(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User).options(
with_expression(User.value, User.name + "foo")
)
self.assert_compile(
stmt,
"SELECT users.name || :name_1 AS anon_1, :param_1 AS anon_2, "
"users.id, "
"users.name FROM users",
)
def test_exported_columns_query_expression(self, query_expression_fixture):
"""test behaviors related to #8881"""
User = query_expression_fixture
stmt = select(User)
eq_(
stmt.selected_columns.keys(),
["value_w_default", "id", "name"],
)
stmt = select(User).options(
with_expression(User.value, User.name + "foo")
)
# bigger problem. we still don't include 'value', because we dont
# run query options here. not "correct", but is at least consistent
# with deferred
eq_(
stmt.selected_columns.keys(),
["value_w_default", "id", "name"],
)
def test_exported_columns_colprop(self, column_property_fixture):
"""test behaviors related to #8881"""
User, _ = column_property_fixture
stmt = select(User)
# we get all the cols because they are not deferred and have a value
eq_(
stmt.selected_columns.keys(),
["concat", "count", "id", "name"],
)
def test_exported_columns_deferred(self, deferred_fixture):
"""test behaviors related to #8881"""
User = deferred_fixture
stmt = select(User)
# don't include 'name_upper' as it's deferred and readonly.
# "name" however is a column on the table, so even though it is
# deferred, it gets special treatment (related to #6661)
eq_(
stmt.selected_columns.keys(),
["id", "name"],
)
stmt = select(User).options(
undefer(User.name), undefer(User.name_upper)
)
# undefer doesn't affect the readonly col because we dont look
# at options when we do selected_columns
eq_(
stmt.selected_columns.keys(),
["id", "name"],
)
def test_with_expr_two(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User.id, User.name, (User.name + "foo").label("foo"))
subq = stmt.subquery()
u1 = aliased(User, subq)
stmt = select(u1).options(with_expression(u1.value, subq.c.foo))
self.assert_compile(
stmt,
"SELECT anon_1.foo, :param_1 AS anon_2, anon_1.id, "
"anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name, "
"users.name || :name_1 AS foo FROM users) AS anon_1",
)
def test_with_expr_three(self, query_expression_w_joinedload_fixture):
"""test :ticket:`6259`"""
User = query_expression_w_joinedload_fixture
stmt = select(User).options(joinedload(User.addresses)).limit(1)
# test that the outer IS NULL is rendered
# test that the inner query does not include a NULL default
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS id, users.name AS name FROM users "
"LIMIT :param_1) AS anon_1 LEFT OUTER "
"JOIN addresses AS addresses_1 ON addresses_1.user_id = anon_1.id "
"AND addresses_1.email_address IS NOT NULL",
)
def test_with_expr_four(self, query_expression_w_joinedload_fixture):
"""test :ticket:`6259`"""
User = query_expression_w_joinedload_fixture
stmt = (
select(User)
.options(
with_expression(User.value, null()), joinedload(User.addresses)
)
.limit(1)
)
# test that the outer IS NULL is rendered, not adapted
# test that the inner query includes the NULL we asked for
# ironically, this statement would not actually fetch due to the NULL
# not allowing adaption and therefore failing on the result set
# matching, this was addressed in #7154.
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.id, anon_2.name, "
"addresses_1.id AS id_1, addresses_1.user_id, "
"addresses_1.email_address FROM (SELECT NULL AS anon_1, "
"users.id AS id, users.name AS name FROM users LIMIT :param_1) "
"AS anon_2 LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.user_id = anon_2.id "
"AND addresses_1.email_address IS NOT NULL",
)
def test_joinedload_outermost(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
# render joined eager loads with stringify
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, addresses.email_address, "
"users_1.id AS id_1, users_1.name FROM addresses "
"LEFT OUTER JOIN users AS users_1 "
"ON users_1.id = addresses.user_id",
)
def test_joinedload_outermost_w_wrapping_elements(self, plain_fixture):
User, Address = plain_fixture
stmt = (
select(User)
.options(joinedload(User.addresses))
.limit(10)
.distinct()
)
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT DISTINCT users.id AS id, users.name AS name FROM users "
"LIMIT :param_1) "
"AS anon_1 LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.id = addresses_1.user_id",
)
def test_contains_eager_outermost_w_wrapping_elements(self, plain_fixture):
"""test #8569"""
User, Address = plain_fixture
stmt = (
select(User)
.join(User.addresses)
.options(contains_eager(User.addresses))
.limit(10)
.distinct()
)
self.assert_compile(
stmt,
"SELECT DISTINCT addresses.id, addresses.user_id, "
"addresses.email_address, users.id AS id_1, users.name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"LIMIT :param_1",
)
def test_joinedload_hard_labeled_selfref(
self, hard_labeled_self_ref_fixture
):
"""test #8569"""
A = hard_labeled_self_ref_fixture
stmt = select(A).options(joinedload(A.as_)).distinct()
self.assert_compile(
stmt,
"SELECT anon_1.hardcoded, anon_1.id, anon_1.a_id, anon_1.data, "
"lower(a_1.data) AS lower_1, a_1.id AS id_1, a_1.a_id AS a_id_1, "
"a_1.data AS data_1 FROM (SELECT DISTINCT lower(a.data) AS "
"hardcoded, a.id AS id, a.a_id AS a_id, a.data AS data FROM a) "
"AS anon_1 LEFT OUTER JOIN a AS a_1 ON anon_1.id = a_1.a_id",
)
def test_contains_eager_hard_labeled_selfref(
self, hard_labeled_self_ref_fixture
):
"""test #8569"""
A = hard_labeled_self_ref_fixture
a1 = aliased(A)
stmt = (
select(A)
.join(A.as_.of_type(a1))
.options(contains_eager(A.as_.of_type(a1)))
.distinct()
)
self.assert_compile(
stmt,
"SELECT DISTINCT lower(a.data) AS hardcoded, "
"lower(a_1.data) AS hardcoded, a_1.id, a_1.a_id, a_1.data, "
"a.id AS id_1, a.a_id AS a_id_1, a.data AS data_1 "
"FROM a JOIN a AS a_1 ON a.id = a_1.a_id",
)
def test_column_properties(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
self.assert_compile(
stmt,
"SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_2, users.id, "
"users.name FROM users",
checkparams={"id_1": 2},
)
def test_column_properties_can_we_use(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
# User, Address = column_property_fixture
# stmt = select(User)
# TODO: shouldn't we be able to get at count ?
# stmt = stmt.where(stmt.selected_columns.count > 5)
# self.assert_compile(stmt, "")
def test_column_properties_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
# TODO: shouldn't we be able to get to stmt.subquery().c.count ?
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_3, users.id AS id, "
"users.name AS name FROM users) AS anon_2",
checkparams={"id_1": 2},
)
def test_column_properties_subquery_two(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
# col properties will retain anonymous labels, however will
# adopt the .key within the subquery collection so they can
# be addressed.
stmt = select(
User.id,
User.name,
User.concat,
User.count,
)
subq = stmt.subquery()
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(subq).where(subq.c.concat == "foo")
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, anon_1.anon_2, anon_1.anon_3 "
"FROM (SELECT users.id AS id, users.name AS name, "
"users.id * :id_1 AS anon_2, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_3 "
"FROM users) AS anon_1 WHERE anon_1.anon_2 = :param_1",
checkparams={"id_1": 2, "param_1": "foo"},
)
def test_column_properties_aliased_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
u1 = aliased(User)
stmt = select(u1)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users_1.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users_1.id = addresses.user_id) AS anon_3, "
"users_1.id AS id, users_1.name AS name "
"FROM users AS users_1) AS anon_2",
checkparams={"id_1": 2},
)
| ExtraColsTest |
python | doocs__leetcode | solution/0100-0199/0149.Max Points on a Line/Solution2.py | {
"start": 0,
"end": 549
} | class ____:
def maxPoints(self, points: List[List[int]]) -> int:
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
n = len(points)
ans = 1
for i in range(n):
x1, y1 = points[i]
cnt = Counter()
for j in range(i + 1, n):
x2, y2 = points[j]
dx, dy = x2 - x1, y2 - y1
g = gcd(dx, dy)
k = (dx // g, dy // g)
cnt[k] += 1
ans = max(ans, cnt[k] + 1)
return ans
| Solution |
python | jazzband__django-oauth-toolkit | tests/test_scopes.py | {
"start": 2397,
"end": 4786
} | class ____(BaseTest):
def test_scopes_saved_in_grant(self):
"""
Test scopes are properly saved in grant
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope1 scope2",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
grant = Grant.objects.get(code=authorization_code)
self.assertEqual(grant.scope, "scope1 scope2")
def test_scopes_save_in_access_token(self):
"""
Test scopes are properly saved in access token
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope1 scope2",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
at = AccessToken.objects.get(token=access_token)
self.assertEqual(at.scope, "scope1 scope2")
| TestScopesSave |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 142455,
"end": 165832
} | class ____:
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_allclose(it0, array([0.19933433254006822, -0.34570883800412566]),
atol=1.5e-8, rtol=0)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_allclose(it2, array([0.0049937546274601858, -0.43423067011231614]),
atol=1.5e-8, rtol=0)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_allclose(oz, ozr, atol=1.5e-8, rtol=0)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_allclose(o1, o1r, atol=1.5e-8, rtol=0)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_allclose(jnnr, 0.099500832639235995, atol=1.5e-8, rtol=0)
def test_negv_jv(self):
assert_allclose(special.jv(-3, 2), -special.jv(3, 2), atol=1.5e-14, rtol=0)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_allclose(yc, y, atol=1.5e-8, rtol=0, err_msg=f'test #{i}')
def test_negv_jve(self):
assert_allclose(special.jve(-3, 2), -special.jve(3, 2),
atol=1.5e-14, rtol=0)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_allclose(jvexp, 0.099500832639235995, atol=1.5e-8, rtol=0)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_allclose(jvexp1, jvexpr, atol=1.5e-8, rtol=0)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_allclose(jn0, array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),
atol=1.5e-4, rtol=0)
assert_allclose(jn1, array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
atol=1.5e-4, rtol=0)
jn102 = special.jn_zeros(102,5)
assert_allclose(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_allclose(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_allclose(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError(f"Invalid t return for nt={nt}")
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_allclose(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
atol=1.5e-4, rtol=0)
jnp = special.jnp_zeros(443,5)
assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1, 5)
assert_allclose(jnz, (array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),
atol=1.5e-5, rtol=0)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_allclose(jvprim, jv0, atol=1.5e-10, rtol=0)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_allclose(ozk,ozkr, atol=1.5e-8, rtol=0)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_allclose(ozke, ozker, atol=1.5e-8, rtol=0)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_allclose(o1k,o1kr, atol=1.5e-8, rtol=0)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_allclose(o1ke, o1ker, atol=1.5e-8, rtol=0)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_allclose(P0.c, [1], atol=1.5e-13, rtol=0)
assert_allclose(P1.c, array([a + b + 2, a - b]) / 2.0,
atol=1.5e-13, rtol=0)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_allclose(P2.c, array(p2c) / 8.0, atol=1.5e-13, rtol=0)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_allclose(P3.c, array(p3c) / 48.0, atol=1.5e-13, rtol=0)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_allclose(kn1, 1.7527038555281462, atol=1.5e-8, rtol=0)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_allclose(kv0, 1.7527038555281462, atol=1.5e-10, rtol=0)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_allclose(kv1, 4.775972543220472, atol=1.5e-10, rtol=0)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_allclose(kv2, 49.51242928773287, atol=1.5e-10, rtol=0)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_allclose(kve1, kv1, atol=1.5e-8, rtol=0)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_allclose(kve2, kv2, atol=1.5e-8, rtol=0)
def test_kvp_v0n1(self):
z = 2.2
assert_allclose(-special.kv(1, z), special.kvp(0, z, n=1),
atol=1.5e-10, rtol=0)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
# this function (kvp) is broken
assert_allclose(xc, x, atol=1.5e-10, rtol=0)
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_allclose(xc, x, atol=1.5e-10, rtol=0)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_allclose(oz, ozr, atol=1.5e-8, rtol=0)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_allclose(o1,o1r, atol=1.5e-8, rtol=0)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_allclose(abs(special.yv(0.0, all)), 0.0, atol=1.5e-11, rtol=0)
assert_allclose(abs(special.yv(1, all) - allval), 0.0, atol=1.5e-11, rtol=0)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_allclose(y1, (array([2.19714]), array([0.52079])),
atol=1.5e-5, rtol=0)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_allclose(y1p, (array([0.5768+0.904j]), array([-0.7635+0.5892j])),
atol=1.5e-3, rtol=0)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_allclose(an, array([5.64515, 9.36162]), atol=1.5e-5, rtol=0)
an = special.yn_zeros(443,5)
assert_allclose(an, [450.13573091578090314,
463.05692376675001542,
472.80651546418663566,
481.27353184725625838,
488.98055964441374646],
rtol=1e-15,)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_allclose(ao, array([2.19714133, 5.42968104]), atol=1.5e-6, rtol=0)
ao = special.ynp_zeros(43,5)
assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_allclose(yn2n, -3.3238249881118471, atol=1.5e-8, rtol=0)
def test_yn_gh_20405(self):
# Enforce correct asymptotic behavior for large n.
observed = cephes.yn(500, 1)
assert observed == -np.inf
def test_negv_yv(self):
assert_allclose(special.yv(-3, 2), -special.yv(3, 2),
atol=1.5e-14, rtol=0)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_allclose(yv2, -3.3238249881118471, atol=1.5e-8, rtol=0)
def test_negv_yve(self):
assert_allclose(special.yve(-3, 2), -special.yve(3, 2),
atol=1.5e-14, rtol=0)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_allclose(yve2, -3.3238249881118471, atol=1.5e-8, rtol=0)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_allclose(yve22, yve2r, atol=1.5e-8, rtol=0)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_allclose(yvp1, yvpr, atol=1.5e-10, rtol=0)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301]
z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300,
10003]
yield from itertools.product(v, z)
# check half-integers; these are problematic points at least
# for cephes/iv
yield from itertools.product(0.5 + arange(-60, 60), [3.5])
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_allclose(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
def skipper(v, z):
return abs(v) > 50
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305,
skip=skipper)
def test_iv_cephes_vs_amos(self):
with np.errstate(all='ignore'):
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
@pytest.mark.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(np.int64)
with np.errstate(all='ignore'):
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(
dc[k] < 2e-7,
(v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j))
)
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_allclose(special.jv(3, 4), 0.43017147387562193)
assert_allclose(special.jv(301, 1300), 0.0183487151115275)
assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_allclose(special.jv(-1, 1), -0.4400505857449335)
assert_allclose(special.jv(-2, 1), 0.1149034849319005)
assert_allclose(special.yv(-1, 1), 0.7812128213002887)
assert_allclose(special.yv(-2, 1), -1.650682606816255)
assert_allclose(special.iv(-1, 1), 0.5651591039924851)
assert_allclose(special.iv(-2, 1), 0.1357476697670383)
assert_allclose(special.kv(-1, 1), 0.6019072301972347)
assert_allclose(special.kv(-2, 1), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_allclose(
special.hankel1(-0.5, 1+1j),
special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j)
)
assert_allclose(
special.hankel2(-0.5, 1+1j),
special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j)
)
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_gh_7909(self):
assert_(special.kv(1.5, 0) == np.inf)
assert_(special.kve(1.5, 0) == np.inf)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_allclose(special.iv(1, 700), 1.528500390233901e302)
assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(double)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(double).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_allclose(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_allclose(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_allclose(cv, v, atol=1.5e-8, rtol=0, err_msg=f'test #{i}')
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0, .1)
assert_allclose(oize, oizer, atol=1.5e-8, rtol=0)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_allclose(cv, v, atol=1.5e-8, rtol=0, err_msg=f'test #{i}')
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1, .1)
assert_allclose(oi1e, oi1er, atol=1.5e-8, rtol=0)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_allclose(iti0, array([31.848667776169801, 1.5673873907283657]),
atol=1.5e-5, rtol=0)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_allclose(it2k, array([0.0012503906973464409, 3.3309450354686687]),
atol=1.5e-6, rtol=0)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_allclose(iv1, 0.90710092578230106, atol=1.5e-10, rtol=0)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_allclose(ive1, iv1, atol=1.5e-10, rtol=0)
def test_ivp0(self):
assert_allclose(special.iv(1, 2), special.ivp(0, 2), atol=1.5e-10, rtol=0)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_allclose(x, y, atol=1.5e-10, rtol=0)
| TestBessel |
python | joerick__pyinstrument | pyinstrument/renderers/jsonrenderer.py | {
"start": 553,
"end": 3336
} | class ____(FrameRenderer):
"""
Outputs a tree of JSON, containing processed frames.
"""
output_file_extension = "json"
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
def render_frame(self, frame: Frame | None):
if frame is None:
return "null"
# we don't use the json module because it uses 2x stack frames, so
# crashes on deep but valid call stacks
property_decls: list[str] = []
property_decls.append('"function": %s' % encode_str(frame.function))
property_decls.append('"file_path_short": %s' % encode_str(frame.file_path_short or ""))
property_decls.append('"file_path": %s' % encode_str(frame.file_path or ""))
property_decls.append('"line_no": %d' % (frame.line_no or 0))
property_decls.append('"time": %f' % frame.time)
property_decls.append('"await_time": %f' % frame.await_time())
property_decls.append(
'"is_application_code": %s' % encode_bool(frame.is_application_code or False)
)
# can't use list comprehension here because it uses two stack frames each time.
children_jsons: list[str] = []
for child in frame.children:
children_jsons.append(self.render_frame(child))
property_decls.append('"children": [%s]' % ",".join(children_jsons))
if frame.group:
property_decls.append('"group_id": %s' % encode_str(frame.group.id))
if frame.class_name:
property_decls.append('"class_name": %s' % encode_str(frame.class_name))
return "{%s}" % ",".join(property_decls)
def render(self, session: Session):
frame = self.preprocess(session.root_frame())
property_decls: list[str] = []
property_decls.append('"start_time": %f' % session.start_time)
property_decls.append('"duration": %f' % session.duration)
property_decls.append('"sample_count": %d' % session.sample_count)
property_decls.append('"target_description": %s' % encode_str(session.target_description))
property_decls.append('"cpu_time": %f' % session.cpu_time)
property_decls.append('"root_frame": %s' % self.render_frame(frame))
return "{%s}\n" % ",".join(property_decls)
def default_processors(self) -> ProcessorList:
return [
processors.remove_importlib,
processors.remove_tracebackhide,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.remove_irrelevant_nodes,
processors.remove_unnecessary_self_time_nodes,
processors.remove_first_pyinstrument_frames_processor,
processors.group_library_frames_processor,
]
| JSONRenderer |
python | pytest-dev__pytest | testing/test_pytester.py | {
"start": 11957,
"end": 28047
} | class ____:
other_path = {"path": "meta_path", "meta_path": "path"}
@staticmethod
def path(n: int) -> str:
return "my-dirty-little-secret-" + str(n)
def test_restore(self, monkeypatch: MonkeyPatch, path_type) -> None:
other_path_type = self.other_path[path_type]
for i in range(10):
assert self.path(i) not in getattr(sys, path_type)
sys_path = [self.path(i) for i in range(6)]
monkeypatch.setattr(sys, path_type, sys_path)
original = list(sys_path)
original_other = list(getattr(sys, other_path_type))
snapshot = SysPathsSnapshot()
transformation = {"source": (0, 1, 2, 3, 4, 5), "target": (6, 2, 9, 7, 5, 8)}
assert sys_path == [self.path(x) for x in transformation["source"]]
sys_path[1] = self.path(6)
sys_path[3] = self.path(7)
sys_path.append(self.path(8))
del sys_path[4]
sys_path[3:3] = [self.path(9)]
del sys_path[0]
assert sys_path == [self.path(x) for x in transformation["target"]]
snapshot.restore()
assert getattr(sys, path_type) is sys_path
assert getattr(sys, path_type) == original
assert getattr(sys, other_path_type) == original_other
def test_preserve_container(self, monkeypatch: MonkeyPatch, path_type) -> None:
other_path_type = self.other_path[path_type]
original_data = list(getattr(sys, path_type))
original_other = getattr(sys, other_path_type)
original_other_data = list(original_other)
new: list[object] = []
snapshot = SysPathsSnapshot()
monkeypatch.setattr(sys, path_type, new)
snapshot.restore()
assert getattr(sys, path_type) is new
assert getattr(sys, path_type) == original_data
assert getattr(sys, other_path_type) is original_other
assert getattr(sys, other_path_type) == original_other_data
def test_pytester_subprocess(pytester: Pytester) -> None:
testfile = pytester.makepyfile("def test_one(): pass")
assert pytester.runpytest_subprocess(testfile).ret == 0
def test_pytester_subprocess_via_runpytest_arg(pytester: Pytester) -> None:
testfile = pytester.makepyfile(
"""
def test_pytester_subprocess(pytester):
import os
testfile = pytester.makepyfile(
\"""
import os
def test_one():
assert {} != os.getpid()
\""".format(os.getpid())
)
assert pytester.runpytest(testfile).ret == 0
"""
)
result = pytester.runpytest_inprocess(
"-p", "pytester", "--runpytest", "subprocess", testfile
)
assert result.ret == 0
def test_unicode_args(pytester: Pytester) -> None:
result = pytester.runpytest("-k", "אבג")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_pytester_run_no_timeout(pytester: Pytester) -> None:
testfile = pytester.makepyfile("def test_no_timeout(): pass")
assert pytester.runpytest_subprocess(testfile).ret == ExitCode.OK
def test_pytester_run_with_timeout(pytester: Pytester) -> None:
testfile = pytester.makepyfile("def test_no_timeout(): pass")
timeout = 120
instant = _pytest.timing.Instant()
result = pytester.runpytest_subprocess(testfile, timeout=timeout)
duration = instant.elapsed()
assert result.ret == ExitCode.OK
assert duration.seconds < timeout
def test_pytester_run_timeout_expires(pytester: Pytester) -> None:
testfile = pytester.makepyfile(
"""
import time
def test_timeout():
time.sleep(10)"""
)
with pytest.raises(pytester.TimeoutExpired):
pytester.runpytest_subprocess(testfile, timeout=1)
def test_linematcher_with_nonlist() -> None:
"""Test LineMatcher with regard to passing in a set (accidentally)."""
from _pytest._code.source import Source
lm = LineMatcher([])
with pytest.raises(TypeError, match="invalid type for lines2: set"):
lm.fnmatch_lines(set()) # type: ignore[arg-type]
with pytest.raises(TypeError, match="invalid type for lines2: dict"):
lm.fnmatch_lines({}) # type: ignore[arg-type]
with pytest.raises(TypeError, match="invalid type for lines2: set"):
lm.re_match_lines(set()) # type: ignore[arg-type]
with pytest.raises(TypeError, match="invalid type for lines2: dict"):
lm.re_match_lines({}) # type: ignore[arg-type]
with pytest.raises(TypeError, match="invalid type for lines2: Source"):
lm.fnmatch_lines(Source()) # type: ignore[arg-type]
lm.fnmatch_lines([])
lm.fnmatch_lines(())
lm.fnmatch_lines("")
assert lm._getlines({}) == {} # type: ignore[arg-type,comparison-overlap]
assert lm._getlines(set()) == set() # type: ignore[arg-type,comparison-overlap]
assert lm._getlines(Source()) == []
assert lm._getlines(Source("pass\npass")) == ["pass", "pass"]
def test_linematcher_match_failure() -> None:
lm = LineMatcher(["foo", "foo", "bar"])
with pytest.raises(pytest.fail.Exception) as e:
lm.fnmatch_lines(["foo", "f*", "baz"])
assert e.value.msg is not None
assert e.value.msg.splitlines() == [
"exact match: 'foo'",
"fnmatch: 'f*'",
" with: 'foo'",
"nomatch: 'baz'",
" and: 'bar'",
"remains unmatched: 'baz'",
]
lm = LineMatcher(["foo", "foo", "bar"])
with pytest.raises(pytest.fail.Exception) as e:
lm.re_match_lines(["foo", "^f.*", "baz"])
assert e.value.msg is not None
assert e.value.msg.splitlines() == [
"exact match: 'foo'",
"re.match: '^f.*'",
" with: 'foo'",
" nomatch: 'baz'",
" and: 'bar'",
"remains unmatched: 'baz'",
]
def test_linematcher_consecutive() -> None:
lm = LineMatcher(["1", "", "2"])
with pytest.raises(pytest.fail.Exception) as excinfo:
lm.fnmatch_lines(["1", "2"], consecutive=True)
assert str(excinfo.value).splitlines() == [
"exact match: '1'",
"no consecutive match: '2'",
" with: ''",
]
lm.re_match_lines(["1", r"\d?", "2"], consecutive=True)
with pytest.raises(pytest.fail.Exception) as excinfo:
lm.re_match_lines(["1", r"\d", "2"], consecutive=True)
assert str(excinfo.value).splitlines() == [
"exact match: '1'",
r"no consecutive match: '\\d'",
" with: ''",
]
@pytest.mark.parametrize("function", ["no_fnmatch_line", "no_re_match_line"])
def test_linematcher_no_matching(function: str) -> None:
if function == "no_fnmatch_line":
good_pattern = "*.py OK*"
bad_pattern = "*X.py OK*"
else:
assert function == "no_re_match_line"
good_pattern = r".*py OK"
bad_pattern = r".*Xpy OK"
lm = LineMatcher(
[
"cachedir: .pytest_cache",
"collecting ... collected 1 item",
"",
"show_fixtures_per_test.py OK",
"=== elapsed 1s ===",
]
)
# check the function twice to ensure we don't accumulate the internal buffer
for i in range(2):
with pytest.raises(pytest.fail.Exception) as e:
func = getattr(lm, function)
func(good_pattern)
obtained = str(e.value).splitlines()
if function == "no_fnmatch_line":
assert obtained == [
f"nomatch: '{good_pattern}'",
" and: 'cachedir: .pytest_cache'",
" and: 'collecting ... collected 1 item'",
" and: ''",
f"fnmatch: '{good_pattern}'",
" with: 'show_fixtures_per_test.py OK'",
]
else:
assert obtained == [
f" nomatch: '{good_pattern}'",
" and: 'cachedir: .pytest_cache'",
" and: 'collecting ... collected 1 item'",
" and: ''",
f"re.match: '{good_pattern}'",
" with: 'show_fixtures_per_test.py OK'",
]
func = getattr(lm, function)
func(bad_pattern) # bad pattern does not match any line: passes
def test_linematcher_no_matching_after_match() -> None:
lm = LineMatcher(["1", "2", "3"])
lm.fnmatch_lines(["1", "3"])
with pytest.raises(pytest.fail.Exception) as e:
lm.no_fnmatch_line("*")
assert str(e.value).splitlines() == ["fnmatch: '*'", " with: '1'"]
def test_linematcher_string_api() -> None:
lm = LineMatcher(["foo", "bar"])
assert str(lm) == "foo\nbar"
def test_pytest_addopts_before_pytester(request, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("PYTEST_ADDOPTS", "--orig-unused")
_: Pytester = request.getfixturevalue("pytester")
assert "PYTEST_ADDOPTS" not in os.environ
def test_run_stdin(pytester: Pytester) -> None:
with pytest.raises(pytester.TimeoutExpired):
pytester.run(
sys.executable,
"-c",
"import sys, time; time.sleep(1); print(sys.stdin.read())",
stdin=subprocess.PIPE,
timeout=0.1,
)
with pytest.raises(pytester.TimeoutExpired):
result = pytester.run(
sys.executable,
"-c",
"import sys, time; time.sleep(1); print(sys.stdin.read())",
stdin=b"input\n2ndline",
timeout=0.1,
)
result = pytester.run(
sys.executable,
"-c",
"import sys; print(sys.stdin.read())",
stdin=b"input\n2ndline",
)
assert result.stdout.lines == ["input", "2ndline"]
assert result.stderr.str() == ""
assert result.ret == 0
def test_popen_stdin_pipe(pytester: Pytester) -> None:
proc = pytester.popen(
[sys.executable, "-c", "import sys; print(sys.stdin.read())"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdin = b"input\n2ndline"
stdout, stderr = proc.communicate(input=stdin)
assert stdout.decode("utf8").splitlines() == ["input", "2ndline"]
assert stderr == b""
assert proc.returncode == 0
def test_popen_stdin_bytes(pytester: Pytester) -> None:
proc = pytester.popen(
[sys.executable, "-c", "import sys; print(sys.stdin.read())"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=b"input\n2ndline",
)
stdout, stderr = proc.communicate()
assert stdout.decode("utf8").splitlines() == ["input", "2ndline"]
assert stderr == b""
assert proc.returncode == 0
def test_popen_default_stdin_stderr_and_stdin_None(pytester: Pytester) -> None:
# stdout, stderr default to pipes,
# stdin can be None to not close the pipe, avoiding
# "ValueError: flush of closed file" with `communicate()`.
#
# Wraps the test to make it not hang when run with "-s".
p1 = pytester.makepyfile(
'''
import sys
def test_inner(pytester):
p1 = pytester.makepyfile(
"""
import sys
print(sys.stdin.read()) # empty
print('stdout')
sys.stderr.write('stderr')
"""
)
proc = pytester.popen([sys.executable, str(p1)], stdin=None)
stdout, stderr = proc.communicate(b"ignored")
assert stdout.splitlines() == [b"", b"stdout"]
assert stderr.splitlines() == [b"stderr"]
assert proc.returncode == 0
'''
)
result = pytester.runpytest("-p", "pytester", str(p1))
assert result.ret == 0
def test_spawn_uses_tmphome(pytester: Pytester) -> None:
tmphome = str(pytester.path)
assert os.environ.get("HOME") == tmphome
pytester._monkeypatch.setenv("CUSTOMENV", "42")
p1 = pytester.makepyfile(
f"""
import os
def test():
assert os.environ["HOME"] == {tmphome!r}
assert os.environ["CUSTOMENV"] == "42"
"""
)
child = pytester.spawn_pytest(str(p1))
out = child.read()
assert child.wait() == 0, out.decode("utf8")
def test_run_result_repr() -> None:
outlines = ["some", "normal", "output"]
errlines = ["some", "nasty", "errors", "happened"]
# known exit code
r = pytester_mod.RunResult(1, outlines, errlines, duration=0.5)
assert repr(r) == (
f"<RunResult ret={pytest.ExitCode.TESTS_FAILED!s} len(stdout.lines)=3"
" len(stderr.lines)=4 duration=0.50s>"
)
# unknown exit code: just the number
r = pytester_mod.RunResult(99, outlines, errlines, duration=0.5)
assert (
repr(r) == "<RunResult ret=99 len(stdout.lines)=3"
" len(stderr.lines)=4 duration=0.50s>"
)
def test_pytester_outcomes_with_multiple_errors(pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
@pytest.fixture
def bad_fixture():
raise Exception("bad")
def test_error1(bad_fixture):
pass
def test_error2(bad_fixture):
pass
"""
)
result = pytester.runpytest(str(p1))
result.assert_outcomes(errors=2)
assert result.parseoutcomes() == {"errors": 2}
def test_parse_summary_line_always_plural() -> None:
"""Parsing summaries always returns plural nouns (#6505)"""
lines = [
"some output 1",
"some output 2",
"======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====",
"done.",
]
assert pytester_mod.RunResult.parse_summary_nouns(lines) == {
"errors": 1,
"failed": 1,
"passed": 1,
"warnings": 1,
}
lines = [
"some output 1",
"some output 2",
"======= 1 failed, 1 passed, 2 warnings, 2 errors in 0.13s ====",
"done.",
]
assert pytester_mod.RunResult.parse_summary_nouns(lines) == {
"errors": 2,
"failed": 1,
"passed": 1,
"warnings": 2,
}
def test_makefile_joins_absolute_path(pytester: Pytester) -> None:
absfile = pytester.path / "absfile"
p1 = pytester.makepyfile(**{str(absfile): ""})
assert str(p1) == str(pytester.path / "absfile.py")
def test_pytester_makefile_dot_prefixes_extension_with_warning(
pytester: Pytester,
) -> None:
with pytest.raises(
ValueError,
match=r"pytester\.makefile expects a file extension, try \.foo\.bar instead of foo\.bar",
):
pytester.makefile("foo.bar", "")
@pytest.mark.filterwarnings("default")
def test_pytester_assert_outcomes_warnings(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import warnings
def test_with_warning():
warnings.warn(UserWarning("some custom warning"))
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1, warnings=1)
# If warnings is not passed, it is not checked at all.
result.assert_outcomes(passed=1)
def test_pytester_outcomes_deselected(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_one():
pass
def test_two():
pass
"""
)
result = pytester.runpytest("-k", "test_one")
result.assert_outcomes(passed=1, deselected=1)
# If deselected is not passed, it is not checked at all.
result.assert_outcomes(passed=1)
def test_pytester_subprocess_with_string_plugins(pytester: Pytester) -> None:
"""Test that pytester.runpytest_subprocess is OK with named (string)
`.plugins`."""
pytester.plugins = ["pytester"]
result = pytester.runpytest_subprocess()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_pytester_subprocess_with_non_string_plugins(pytester: Pytester) -> None:
"""Test that pytester.runpytest_subprocess fails with a proper error given
non-string `.plugins`."""
class MyPlugin:
pass
pytester.plugins = [MyPlugin()]
with pytest.raises(ValueError, match="plugins as objects is not supported"):
pytester.runpytest_subprocess()
| TestSysPathsSnapshot |
python | getsentry__sentry | src/sentry/api/endpoints/project_artifact_bundle_files.py | {
"start": 813,
"end": 1175
} | class ____:
def __init__(self, file_path: str, info: dict[str, str]):
self.file_path = file_path
self.info = info
def __eq__(self, other):
return self.file_path == other.file_path
def __hash__(self):
return hash(self.file_path)
def __lt__(self, other):
return self.file_path < other.file_path
| ArtifactFile |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 8473,
"end": 9015
} | class ____(LTTextContainer):
def __init__(self, word_margin):
LTTextContainer.__init__(self)
self.word_margin = word_margin
return
def __repr__(self):
return ('<%s %s %r>' %
(self.__class__.__name__, bbox2str(self.bbox),
self.get_text()))
def analyze(self, laparams):
LTTextContainer.analyze(self, laparams)
LTContainer.add(self, LTAnno('\n'))
return
def find_neighbors(self, plane, ratio):
raise NotImplementedError
| LTTextLine |
python | PyCQA__pylint | tests/functional/n/non/non_iterator_returned.py | {
"start": 337,
"end": 627
} | class ____:
""" __iter__ and next """
def __iter__(self):
return self
def __next__(self):
""" Infinite iterator, but still an iterator """
return 1
def next(self):
"""Same as __next__, but for Python 2."""
return 1
| SecondGoodIterator |
python | tensorflow__tensorflow | tensorflow/security/fuzzing/python_fuzzing.py | {
"start": 1434,
"end": 7142
} | class ____(object):
"""FuzzingHelper makes handling FuzzedDataProvider easier with TensorFlow Python fuzzing."""
def __init__(self, input_bytes):
"""FuzzingHelper initializer.
Args:
input_bytes: Input randomized bytes used to create a FuzzedDataProvider.
"""
self.fdp = atheris.FuzzedDataProvider(input_bytes)
def get_bool(self):
"""Consume a bool.
Returns:
Consumed a bool based on input bytes and constraints.
"""
return self.fdp.ConsumeBool()
def get_int(self, min_int=_MIN_INT, max_int=_MAX_INT):
"""Consume a signed integer with given constraints.
Args:
min_int: Minimum allowed integer.
max_int: Maximum allowed integer.
Returns:
Consumed integer based on input bytes and constraints.
"""
return self.fdp.ConsumeIntInRange(min_int, max_int)
def get_float(self, min_float=_MIN_FLOAT, max_float=_MAX_FLOAT):
"""Consume a float with given constraints.
Args:
min_float: Minimum allowed float.
max_float: Maximum allowed float.
Returns:
Consumed float based on input bytes and constraints.
"""
return self.fdp.ConsumeFloatInRange(min_float, max_float)
def get_int_list(self,
min_length=_MIN_LENGTH,
max_length=_MAX_LENGTH,
min_int=_MIN_INT,
max_int=_MAX_INT):
"""Consume a signed integer list with given constraints.
Args:
min_length: The minimum length of the list.
max_length: The maximum length of the list.
min_int: Minimum allowed integer.
max_int: Maximum allowed integer.
Returns:
Consumed integer list based on input bytes and constraints.
"""
length = self.get_int(min_length, max_length)
return self.fdp.ConsumeIntListInRange(length, min_int, max_int)
def get_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH):
"""Consume a float list with given constraints.
Args:
min_length: The minimum length of the list.
max_length: The maximum length of the list.
Returns:
Consumed integer list based on input bytes and constraints.
"""
length = self.get_int(min_length, max_length)
return self.fdp.ConsumeFloatListInRange(length, _MIN_FLOAT, _MAX_FLOAT)
def get_int_or_float_list(self,
min_length=_MIN_LENGTH,
max_length=_MAX_LENGTH):
"""Consume a signed integer or float list with given constraints based on a consumed bool.
Args:
min_length: The minimum length of the list.
max_length: The maximum length of the list.
Returns:
Consumed integer or float list based on input bytes and constraints.
"""
if self.get_bool():
return self.get_int_list(min_length, max_length)
else:
return self.get_float_list(min_length, max_length)
def get_tf_dtype(self, allowed_set=None):
"""Return a random tensorflow dtype.
Args:
allowed_set: An allowlisted set of dtypes to choose from instead of all of
them.
Returns:
A random type from the list containing all TensorFlow types.
"""
if allowed_set:
index = self.get_int(0, len(allowed_set) - 1)
if allowed_set[index] not in _TF_DTYPES:
raise tf.errors.InvalidArgumentError(
None, None,
'Given dtype {} is not accepted.'.format(allowed_set[index]))
return allowed_set[index]
else:
index = self.get_int(0, len(_TF_DTYPES) - 1)
return _TF_DTYPES[index]
def get_string(self, byte_count=_MAX_INT):
"""Consume a string with given constraints based on a consumed bool.
Args:
byte_count: Byte count that defaults to _MAX_INT.
Returns:
Consumed string based on input bytes and constraints.
"""
return self.fdp.ConsumeString(byte_count)
def get_random_numeric_tensor(self,
dtype=None,
min_size=_MIN_SIZE,
max_size=_MAX_SIZE,
min_val=_MIN_INT,
max_val=_MAX_INT):
"""Return a tensor of random shape and values.
Generated tensors are capped at dimension sizes of 8, as 2^32 bytes of
requested memory crashes the fuzzer (see b/34190148).
Returns only type that tf.random.uniform can generate. If you need a
different type, consider using tf.cast.
Args:
dtype: Type of tensor, must of one of the following types: float16,
float32, float64, int32, or int64
min_size: Minimum size of returned tensor
max_size: Maximum size of returned tensor
min_val: Minimum value in returned tensor
max_val: Maximum value in returned tensor
Returns:
Tensor of random shape filled with uniformly random numeric values.
"""
# Max shape can be 8 in length and randomized from 0-8 without running into
# an OOM error.
if max_size > 8:
raise tf.errors.InvalidArgumentError(
None, None,
'Given size of {} will result in an OOM error'.format(max_size))
seed = self.get_int()
shape = self.get_int_list(
min_length=min_size,
max_length=max_size,
min_int=min_size,
max_int=max_size)
if dtype is None:
dtype = self.get_tf_dtype(allowed_set=_TF_RANDOM_DTYPES)
elif dtype not in _TF_RANDOM_DTYPES:
raise tf.errors.InvalidArgumentError(
None, None,
'Given dtype {} is not accepted in get_random_numeric_tensor'.format(
dtype))
return tf.random.uniform(
shape=shape, minval=min_val, maxval=max_val, dtype=dtype, seed=seed)
| FuzzingHelper |
python | Pylons__pyramid | tests/test_testing.py | {
"start": 22716,
"end": 22804
} | class ____:
def __init__(self, kw):
self.__dict__.update(kw)
| DummyRendererInfo |
python | google__pytype | pytype/tests/test_annotations.py | {
"start": 36253,
"end": 37948
} | class ____(test_base.BaseTest):
"""Tests for stringified annotations."""
def test_postponed_evaluation(self):
self.Check("""
from __future__ import annotations
def f() -> int:
return 0
""")
def test_postponed_evaluation_error(self):
self.CheckWithErrors("""
from __future__ import annotations
def f() -> str:
return 0 # bad-return-type
""")
def test_forward_reference(self):
self.Check("""
from __future__ import annotations
from typing import Optional
class A:
b: Optional[B] = None
class B:
pass
assert_type(A().b, Optional[B])
""")
def test_explicit_forward_reference(self):
# Check that explicit string annotations still work.
self.Check("""
from __future__ import annotations
from typing import Optional
class A:
b: Optional['B'] = None
c: "Optional['B']" = None
class B:
pass
assert_type(A().b, Optional[B])
assert_type(A().c, Optional[B])
""")
def test_generic_forward_reference_to_collection(self):
# Makes sure that we don't get an error when set[int] is converted to
# typing.Set[int] (since typing.Set is not imported).
ty = self.Infer("""
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]):
def f(self) -> A[set[int]]:
return self
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, Set, TypeVar
T = TypeVar('T')
class A(Generic[T]):
def f(self) -> A[Set[int]]: ...
""",
)
| TestStringifiedAnnotations |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 43278,
"end": 53385
} | class ____(_fixtures.FixtureTest):
run_inserts = None
def test_one_to_many_1(self):
"""Basic save of one to many."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
),
)
u = User(name="one2manytester")
a = Address(email_address="one2many@test.org")
u.addresses.append(a)
a2 = Address(email_address="lala@test.org")
u.addresses.append(a2)
session = fixture_session()
session.add(u)
session.flush()
conn = session.connection()
user_rows = conn.execute(
users.select().where(users.c.id.in_([u.id]))
).fetchall()
eq_(list(user_rows[0]), [u.id, "one2manytester"])
address_rows = conn.execute(
addresses.select()
.order_by(addresses.c.email_address)
.where(
addresses.c.id.in_([a.id, a2.id]),
)
).fetchall()
eq_(list(address_rows[0]), [a2.id, u.id, "lala@test.org"])
eq_(list(address_rows[1]), [a.id, u.id, "one2many@test.org"])
userid = u.id
addressid = a2.id
a2.email_address = "somethingnew@foo.com"
session.flush()
address_rows = conn.execute(
addresses.select().where(addresses.c.id == addressid)
).fetchall()
eq_(list(address_rows[0]), [addressid, userid, "somethingnew@foo.com"])
self.assert_(u.id == userid and a2.id == addressid)
def test_one_to_many_2(self):
"""Modifying the child items of an object."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
),
)
u1 = User(name="user1")
u1.addresses = []
a1 = Address(email_address="emailaddress1")
u1.addresses.append(a1)
u2 = User(name="user2")
u2.addresses = []
a2 = Address(email_address="emailaddress2")
u2.addresses.append(a2)
a3 = Address(email_address="emailaddress3")
session = fixture_session()
session.add_all((u1, u2, a3))
session.flush()
# modify user2 directly, append an address to user1.
# upon commit, user2 should be updated, user1 should not
# both address1 and address3 should be updated
u2.name = "user2modified"
u1.addresses.append(a3)
del u1.addresses[0]
self.assert_sql(
testing.db,
session.flush,
[
(
"UPDATE users SET name=:name "
"WHERE users.id = :users_id",
{"users_id": u2.id, "name": "user2modified"},
),
(
"UPDATE addresses SET user_id=:user_id "
"WHERE addresses.id = :addresses_id",
[
{"user_id": None, "addresses_id": a1.id},
{"user_id": u1.id, "addresses_id": a3.id},
],
),
],
)
def test_child_move(self):
"""Moving a child from one parent to another, with a delete.
Tests that deleting the first parent properly updates the child with
the new parent. This tests the 'trackparent' option in the attributes
module.
"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
),
)
u1 = User(name="user1")
u2 = User(name="user2")
a = Address(email_address="address1")
u1.addresses.append(a)
session = fixture_session()
session.add_all((u1, u2))
session.flush()
del u1.addresses[0]
u2.addresses.append(a)
session.delete(u1)
session.flush()
session.expunge_all()
u2 = session.get(User, u2.id)
eq_(len(u2.addresses), 1)
def test_child_move_2(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
),
)
u1 = User(name="user1")
u2 = User(name="user2")
a = Address(email_address="address1")
u1.addresses.append(a)
session = fixture_session()
session.add_all((u1, u2))
session.flush()
del u1.addresses[0]
u2.addresses.append(a)
session.flush()
session.expunge_all()
u2 = session.get(User, u2.id)
eq_(len(u2.addresses), 1)
def test_o2m_delete_parent(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
uselist=False,
)
),
)
u = User(name="one2onetester")
a = Address(email_address="myonlyaddress@foo.com")
u.address = a
session = fixture_session()
session.add(u)
session.flush()
session.delete(u)
session.flush()
assert a.id is not None
assert a.user_id is None
assert sa.orm.attributes.instance_state(a).key in session.identity_map
assert (
sa.orm.attributes.instance_state(u).key not in session.identity_map
)
def test_one_to_one(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
uselist=False,
)
),
)
u = User(name="one2onetester")
u.address = Address(email_address="myonlyaddress@foo.com")
session = fixture_session()
session.add(u)
session.flush()
u.name = "imnew"
session.flush()
u.address.email_address = "imnew@foo.com"
session.flush()
def test_bidirectional(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
m1 = self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(m1, lazy="joined", backref="addresses")
),
)
u = User(name="test")
Address(email_address="testaddress", user=u)
session = fixture_session()
session.add(u)
session.flush()
session.delete(u)
session.flush()
def test_double_relationship(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
m2 = self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"boston_addresses": relationship(
m2,
primaryjoin=sa.and_(
users.c.id == addresses.c.user_id,
addresses.c.email_address.like("%boston%"),
),
overlaps="newyork_addresses",
),
"newyork_addresses": relationship(
m2,
primaryjoin=sa.and_(
users.c.id == addresses.c.user_id,
addresses.c.email_address.like("%newyork%"),
),
overlaps="boston_addresses",
),
},
)
u = User(name="u1")
a = Address(email_address="foo@boston.com")
b = Address(email_address="bar@newyork.com")
u.boston_addresses.append(a)
u.newyork_addresses.append(b)
session = fixture_session()
session.add(u)
session.flush()
| OneToManyTest |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 60613,
"end": 64144
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: BigBirdPegasusConfig, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.embed_dim = config.d_model
self.self_attn = BigBirdPegasusEncoderAttention(config, seed=seed)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
self_attention_outputs = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=from_blocked_mask,
to_blocked_mask=to_blocked_mask,
)
hidden_states = self_attention_outputs[0]
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attention_outputs[1],)
return outputs
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.self_attn.set_attention_type(value)
| BigBirdPegasusEncoderLayer |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 142256,
"end": 145893
} | class ____(ASTBase):
def __init__(
self, params: list[ASTTemplateParam], requiresClause: ASTRequiresClause | None
) -> None:
assert params is not None
self.params = params
self.requiresClause = requiresClause
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateParams):
return NotImplemented
return (
self.params == other.params and self.requiresClause == other.requiresClause
)
def __hash__(self) -> int:
return hash((self.params, self.requiresClause))
def get_id(self, version: int, excludeRequires: bool = False) -> str:
assert version >= 2
res = ['I']
res.extend(param.get_id(version) for param in self.params)
res.append('E')
if not excludeRequires and self.requiresClause:
res.extend(['IQ', self.requiresClause.expr.get_id(version), 'E'])
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
res.extend((
'template<',
', '.join(transform(a) for a in self.params),
'> ',
))
if self.requiresClause is not None:
res.extend((transform(self.requiresClause), ' '))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('template', 'template')
signode += addnodes.desc_sig_punctuation('<', '<')
first = True
for param in self.params:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
param.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation('>', '>')
if self.requiresClause is not None:
signode += addnodes.desc_sig_space()
self.requiresClause.describe_signature(signode, mode, env, symbol)
def describe_signature_as_introducer(
self,
parentNode: desc_signature,
mode: str,
env: BuildEnvironment,
symbol: Symbol,
lineSpec: bool,
) -> None:
def make_line(parent_node: desc_signature) -> addnodes.desc_signature_line:
signode = addnodes.desc_signature_line()
parent_node += signode
signode.sphinx_line_type = 'templateParams'
return signode
line_node = make_line(parentNode)
line_node += addnodes.desc_sig_keyword('template', 'template')
line_node += addnodes.desc_sig_punctuation('<', '<')
first = True
for param in self.params:
if not first:
line_node += addnodes.desc_sig_punctuation(',', ',')
line_node += addnodes.desc_sig_space()
first = False
if lineSpec:
line_node = make_line(parentNode)
param.describe_signature(line_node, mode, env, symbol)
if lineSpec and not first:
line_node = make_line(parentNode)
line_node += addnodes.desc_sig_punctuation('>', '>')
if self.requiresClause:
req_node = addnodes.desc_signature_line()
req_node.sphinx_line_type = 'requiresClause'
parentNode += req_node
self.requiresClause.describe_signature(req_node, 'markType', env, symbol)
# Template introducers
################################################################################
| ASTTemplateParams |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/setup.py | {
"start": 972,
"end": 2743
} | class ____(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['graphql', '-vrsx']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='graphql-core',
version=version,
description='GraphQL implementation for Python',
url='https://github.com/graphql-python/graphql-core',
download_url='https://github.com/graphql-python/graphql-core/releases',
author='Syrus Akbary, Jake Heinz, Taeho Kim',
author_email='Syrus Akbary <me@syrusakbary.com>, Jake Heinz <me@jh.gg>, Taeho Kim <dittos@gmail.com>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
],
keywords='api graphql protocol rest',
packages=find_packages(exclude=['tests', 'tests_py35']),
install_requires=install_requires,
tests_require=tests_requires,
cmdclass = {'test': PyTest},
extras_require={
'gevent': [
'gevent==1.1rc1'
],
'test': tests_requires
}
)
| PyTest |
python | pennersr__django-allauth | tests/apps/account/test_signup.py | {
"start": 5117,
"end": 16769
} | class ____(TestCase):
def test_signup_same_email_verified_externally(self):
user = self._test_signup_email_verified_externally(
"john@example.com", "john@example.com"
)
self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1)
EmailAddress.objects.get(
verified=True, email="john@example.com", user=user, primary=True
)
def test_signup_other_email_verified_externally(self):
"""
John is invited on john@example.org, but signs up via john@example.com.
Email verification is by-passed, their home email address is
used as a secondary.
"""
user = self._test_signup_email_verified_externally(
"john@example.com", "john@example.org"
)
self.assertEqual(EmailAddress.objects.filter(user=user).count(), 2)
EmailAddress.objects.get(
verified=False, email="john@example.com", user=user, primary=False
)
EmailAddress.objects.get(
verified=True, email="john@example.org", user=user, primary=True
)
def _test_signup_email_verified_externally(self, signup_email, verified_email):
username = "johndoe"
request = RequestFactory().post(
reverse("account_signup"),
{
"username": username,
"email": signup_email,
"password1": "johndoe",
"password2": "johndoe",
},
)
# Fake stash_verified_email
SessionMiddleware(lambda request: None).process_request(request)
MessageMiddleware(lambda request: None).process_request(request)
request.user = AnonymousUser()
request.session["account_verified_email"] = verified_email
from allauth.account.views import signup
with context.request_context(request):
resp = signup(request)
self.assertEqual(resp.status_code, HTTPStatus.FOUND)
self.assertEqual(
resp["location"], get_adapter().get_signup_redirect_url(request)
)
self.assertEqual(len(mail.outbox), 0)
return get_user_model().objects.get(username=username)
@override_settings(
ACCOUNT_USERNAME_REQUIRED=True,
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE=True,
)
def test_signup_password_twice_form_error(self):
resp = self.client.post(
reverse("account_signup"),
data={
"username": "johndoe",
"email": "john@example.org",
"password1": "johndoe",
"password2": "janedoe",
},
)
self.assertFormError(
resp.context["form"],
"password2",
"You must type the same password each time.",
)
@override_settings(
ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True
)
def test_signup_email_twice(self):
request = RequestFactory().post(
reverse("account_signup"),
{
"username": "johndoe",
"email": "john@example.org",
"email2": "john@example.org",
"password1": "johndoe",
"password2": "johndoe",
},
)
SessionMiddleware(lambda request: None).process_request(request)
MessageMiddleware(lambda request: None).process_request(request)
request.user = AnonymousUser()
from allauth.account.views import signup
with context.request_context(request):
signup(request)
user = get_user_model().objects.get(username="johndoe")
self.assertEqual(user.email, "john@example.org")
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": 9,
},
}
]
)
def test_django_password_validation(self):
resp = self.client.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": "john@example.com",
"password1": "johndoe",
"password2": "johndoe",
},
)
self.assertFormError(resp.context["form"], None, [])
self.assertFormError(
resp.context["form"],
"password1",
["This password is too short. It must contain at least 9 characters."],
)
def test_prevent_enumeration_with_mandatory_verification(
settings, user_factory, email_factory
):
settings.ACCOUNT_PREVENT_ENUMERATION = True
settings.ACCOUNT_LOGIN_METHODS = {app_settings.LoginMethod.EMAIL}
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.MANDATORY
user = user_factory(username="john", email="john@example.org", password="doe")
c = Client()
resp = c.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": email_factory(email=user.email, mixed_case=True),
"password1": "johndoe",
"password2": "johndoe",
},
)
assert resp.status_code == HTTPStatus.FOUND
assert resp["location"] == reverse("account_email_verification_sent")
assertTemplateUsed(resp, "account/email/account_already_exists_message.txt")
assertTemplateUsed(resp, "account/messages/email_confirmation_sent.txt")
assert EmailAddress.objects.filter(email="john@example.org").count() == 1
def test_prevent_enumeration_off(settings, user_factory, email_factory):
settings.ACCOUNT_PREVENT_ENUMERATION = False
settings.ACCOUNT_LOGIN_METHODS = {app_settings.LoginMethod.EMAIL}
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.MANDATORY
user = user_factory(username="john", email="john@example.org", password="doe")
c = Client()
resp = c.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": email_factory(email=user.email, mixed_case=True),
"password1": "johndoe",
"password2": "johndoe",
},
)
assert resp.status_code == HTTPStatus.OK
assert resp.context["form"].errors == {
"email": ["A user is already registered with this email address."]
}
def test_prevent_enumeration_strictly(settings, user_factory, email_factory):
settings.ACCOUNT_PREVENT_ENUMERATION = "strict"
settings.ACCOUNT_LOGIN_METHODS = {app_settings.LoginMethod.EMAIL}
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.NONE
user = user_factory(username="john", email="john@example.org", password="doe")
c = Client()
resp = c.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": email_factory(email=user.email, mixed_case=True),
"password1": "johndoe",
"password2": "johndoe",
},
)
assert resp.status_code == HTTPStatus.FOUND
assert resp["location"] == settings.LOGIN_REDIRECT_URL
assert EmailAddress.objects.filter(email="john@example.org").count() == 2
def test_prevent_enumeration_on(settings, user_factory, email_factory):
settings.ACCOUNT_PREVENT_ENUMERATION = True
settings.ACCOUNT_LOGIN_METHODS = {app_settings.LoginMethod.EMAIL}
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.NONE
user = user_factory(username="john", email="john@example.org", password="doe")
c = Client()
resp = c.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": email_factory(email=user.email, mixed_case=True),
"password1": "johndoe",
"password2": "johndoe",
},
)
assert resp.status_code == HTTPStatus.OK
assert resp.context["form"].errors == {
"email": ["A user is already registered with this email address."]
}
@pytest.mark.django_db
def test_get_initial_with_valid_email():
"""Test that the email field is populated with a valid email."""
request = RequestFactory().get("/signup/?email=test@example.com")
from allauth.account.views import signup
SessionMiddleware(lambda request: None).process_request(request)
request.user = AnonymousUser()
with context.request_context(request):
view = signup(request)
assert view.context_data["view"].get_initial()["email"] == "test@example.com"
def test_signup_user_model_no_email(settings, client, password_factory, db, mailoutbox):
settings.ACCOUNT_USERNAME_REQUIRED = False
settings.ACCOUNT_EMAIL_REQUIRED = True
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.MANDATORY
settings.ACCOUNT_USER_MODEL_EMAIL_FIELD = None
password = password_factory()
email = "user@example.com"
resp = client.post(
reverse("account_signup"),
{
"email": email,
"password1": password,
"password2": password,
},
)
assert resp.status_code == HTTPStatus.FOUND
email = EmailAddress.objects.get(email=email)
assert email.primary
assert not email.verified
assert len(mailoutbox) == 1
def test_email_lower_case(db, settings):
settings.ACCOUNT_LOGIN_METHODS = {app_settings.LoginMethod.EMAIL}
settings.ACCOUNT_EMAIL_VERIFICATION = app_settings.EmailVerificationMethod.NONE
c = Client()
resp = c.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": "JoHn@DoE.oRg",
"password1": "johndoe",
"password2": "johndoe",
},
)
assert resp.status_code == HTTPStatus.FOUND
assert EmailAddress.objects.filter(email="john@doe.org").count() == 1
def test_does_not_create_user_when_honeypot_filled_out(client, db, settings):
settings.ACCOUNT_SIGNUP_FORM_HONEYPOT_FIELD = "phone_number"
resp = client.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": "john@example.com",
"password1": "Password1@",
"password2": "Password1@",
"phone_number": "5551231234",
},
)
assert not get_user_model().objects.all().exists()
assert resp.status_code == HTTPStatus.FOUND
def test_create_user_when_honeypot_not_filled_out(client, db, settings):
settings.ACCOUNT_SIGNUP_FORM_HONEYPOT_FIELD = "phone_number"
resp = client.post(
reverse("account_signup"),
{
"username": "johndoe",
"email": "john@example.com",
"password1": "Password1@",
"password2": "Password1@",
"phone_number": "",
},
)
assert get_user_model().objects.filter(username="johndoe").count() == 1
assert resp.status_code == HTTPStatus.FOUND
def test_signup_without_password(
db,
client,
email_factory,
settings_impacting_urls,
):
with settings_impacting_urls(
ACCOUNT_LOGIN_BY_CODE_ENABLED=True,
ACCOUNT_EMAIL_VERIFICATION="mandatory",
ACCOUNT_EMAIL_VERIFICATION_BY_CODE_ENABLED=True,
ACCOUNT_SIGNUP_FIELDS=["email*", "password1"],
):
email = email_factory()
resp = client.post(
reverse("account_signup"),
data={
"username": "wizard",
"email": email,
},
)
assert resp.status_code == HTTPStatus.FOUND
user = get_user_model().objects.get(email=email)
assert not user.check_password("")
| SignupTests |
python | pytorch__pytorch | torch/distributed/tensor/_op_schema.py | {
"start": 21037,
"end": 22427
} | class ____:
"""
OutputSharding is a data class that is used by the sharding propagation,
it could set the output_spec upon successful propagation. If needs_redistribute
is set to True, a redistribute_schema would be returned together to indicate
the input arguments needs to be redistributed before the op execution.
NOTE: the redistribute_schema generated by sharding propagation should be
exactly the same as the operator OpSchema, except the DTensorSpecs
"""
# specifies the output sharding pattern
output_spec: OutputSpecType
# schema for redistribution if needed
redistribute_schema: OpSchema | None = None
# flag indicating if inputs need redistribution
needs_redistribute: bool = False
# flag to use values from `redistribute_schema`
use_val_from_redistribute_schema: bool = False
@cached_property
def mesh(self):
if isinstance(self.output_spec, DTensorSpec):
return self.output_spec.mesh
elif isinstance(self.output_spec, tuple):
out_spec = self.output_spec[0]
if isinstance(out_spec, DTensorSpec):
return out_spec.mesh
else:
raise ValueError(f"Unknown output spec type: {type(out_spec)}")
else:
raise ValueError(f"Unknown output spec type: {type(self.output_spec)}")
@dataclass
| OutputSharding |
python | numba__numba | numba/tests/test_ctypes.py | {
"start": 1786,
"end": 7431
} | class ____(MemoryLeakMixin, TestCase):
def test_c_sin(self):
pyfunc = use_c_sin
cfunc = njit((types.double,))(pyfunc)
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_two_funcs(self):
# Check that two constant functions don't get mixed up.
pyfunc = use_two_funcs
cfunc = njit((types.double,))(pyfunc)
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
@unittest.skipUnless(is_windows, "Windows-specific test")
def test_stdcall(self):
# Just check that it doesn't crash
cfunc = njit((types.uintc,))(use_c_sleep)
cfunc(1)
def test_ctype_wrapping(self):
pyfunc = use_ctype_wrapping
cfunc = njit((types.double,))(pyfunc)
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_voidptr(self):
pyfunc = use_c_pointer
# pyfunc will segfault if called
cfunc = njit((types.int32,))(pyfunc)
x = 123
self.assertEqual(cfunc(x), x + 1)
def test_function_pointer(self):
pyfunc = use_func_pointer
cfunc = jit(nopython=True)(pyfunc)
for (fa, fb, x) in [
(c_sin, c_cos, 1.0),
(c_sin, c_cos, -1.0),
(c_cos, c_sin, 1.0),
(c_cos, c_sin, -1.0)]:
expected = pyfunc(fa, fb, x)
got = cfunc(fa, fb, x)
self.assertEqual(got, expected)
# A single specialization was compiled for all calls
self.assertEqual(len(cfunc.overloads), 1, cfunc.overloads)
def test_untyped_function(self):
with self.assertRaises(TypeError) as raises:
njit((types.double,))(use_c_untyped)
self.assertIn("ctypes function '_numba_test_exp' doesn't define its argument types",
str(raises.exception))
def test_python_call_back(self):
mydct = {'what': 1232121}
def call_me_maybe(arr):
return mydct[arr[0].decode('ascii')]
# Create a callback into the python interpreter
py_call_back = CFUNCTYPE(c_int, py_object)(call_me_maybe)
def pyfunc(a):
what = py_call_back(a)
return what
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.array(["what"], dtype='S10')
self.assertEqual(pyfunc(arr), cfunc(arr))
def test_python_call_back_threaded(self):
def pyfunc(a, repeat):
out = 0
for _ in range(repeat):
out += py_call_back(a)
return out
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.array(["what"], dtype='S10')
repeat = 1000
expected = pyfunc(arr, repeat)
outputs = []
# Warm up
cfunc(arr, repeat)
# Test the function in multiple threads to exercise the
# GIL ensure/release code
def run(func, arr, repeat):
outputs.append(func(arr, repeat))
threads = [threading.Thread(target=run, args=(cfunc, arr, repeat))
for _ in range(10)]
# Start threads
for th in threads:
th.start()
# End threads
for th in threads:
th.join()
# Check results
for got in outputs:
self.assertEqual(expected, got)
def test_passing_array_ctypes_data(self):
"""
Test the ".ctypes.data" attribute of an array can be passed
as a "void *" parameter.
"""
def pyfunc(arr):
return c_take_array_ptr(arr.ctypes.data)
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.arange(5)
expected = pyfunc(arr)
got = cfunc(arr)
self.assertEqual(expected, got)
def check_array_ctypes(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
arr = np.linspace(0, 10, 5)
expected = arr ** 2.0
got = cfunc(arr)
self.assertPreciseEqual(expected, got)
return cfunc
def test_passing_array_ctypes_voidptr(self):
"""
Test the ".ctypes" attribute of an array can be passed
as a "void *" parameter.
"""
self.check_array_ctypes(use_c_vsquare)
def test_passing_array_ctypes_voidptr_pass_ptr(self):
"""
Test the ".ctypes" attribute of an array can be passed
as a pointer parameter of the right type.
"""
cfunc = self.check_array_ctypes(use_c_vcube)
# Non-compatible pointers are not accepted (here float32* vs. float64*)
with self.assertRaises(errors.TypingError) as raises:
cfunc(np.float32([0.0]))
self.assertIn("No implementation of function ExternalFunctionPointer",
str(raises.exception))
def test_storing_voidptr_to_int_array(self):
# Make C callback that returns a void*
cproto = CFUNCTYPE(c_void_p)
@cproto
def get_voidstar():
return 0xdeadbeef
# Make python functions that use the C callback
def pyfunc(a):
ptr = get_voidstar()
a[0] = ptr
return ptr
# Compile it
cfunc = njit((types.uintp[::1],))(pyfunc)
# Setup inputs
arr_got = np.zeros(1, dtype=np.uintp)
arr_expect = arr_got.copy()
# Run functions
ret_got = cfunc(arr_got)
ret_expect = pyfunc(arr_expect)
# Check
self.assertEqual(ret_expect, 0xdeadbeef)
self.assertPreciseEqual(ret_got, ret_expect)
self.assertPreciseEqual(arr_got, arr_expect)
if __name__ == '__main__':
unittest.main()
| TestCTypesUseCases |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 29721,
"end": 29951
} | class ____(ast.NodeTransformer):
"""Throws an error when it sees a number."""
def visit_Constant(self, node):
if isinstance(node.value, int):
raise ValueError("test")
return node
| ErrorTransformer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 37251,
"end": 37596
} | class ____(sgqlc.types.Enum):
"""Properties by which mannequins can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order mannequins why when they were created.
* `LOGIN`: Order mannequins alphabetically by their source login.
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT", "LOGIN")
| MannequinOrderField |
python | doocs__leetcode | solution/0500-0599/0598.Range Addition II/Solution.py | {
"start": 0,
"end": 183
} | class ____:
def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
for a, b in ops:
m = min(m, a)
n = min(n, b)
return m * n
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1035200,
"end": 1035672
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateProjectCard"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_card")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_card = sgqlc.types.Field("ProjectCard", graphql_name="projectCard")
"""The updated ProjectCard."""
| UpdateProjectCardPayload |
python | pydantic__pydantic | tests/mypy/modules/no_strict_optional.py | {
"start": 152,
"end": 335
} | class ____(BaseModel):
model_config = ConfigDict(
validate_assignment=True,
validate_default=True,
extra='forbid',
frozen=True,
)
| CustomBaseModel |
python | walkccc__LeetCode | solutions/1916. Count Ways to Build Rooms in an Ant Colony/1916.py | {
"start": 0,
"end": 505
} | class ____:
def waysToBuildRooms(self, prevRoom: list[int]) -> int:
MOD = 1_000_000_007
graph = collections.defaultdict(list)
for i, prev in enumerate(prevRoom):
graph[prev].append(i)
def dfs(node: int) -> tuple[int, int]:
if not graph[node]:
return 1, 1
ans = 1
l = 0
for child in graph[node]:
temp, r = dfs(child)
ans = (ans * temp * math.comb(l + r, r)) % MOD
l += r
return ans, l + 1
return dfs(0)[0]
| Solution |
python | scipy__scipy | scipy/interpolate/_fitpack_repro.py | {
"start": 23127,
"end": 31173
} | class ____:
"""
Fit a smooth periodic B-spline curve to given data points.
This class fits a periodic B-spline curve S(t) of degree k through data points
(x, y) with knots t. The spline is smooth and repeats itself at the start and
end, meaning the function and its derivatives up to order k-1 are equal
at the boundaries.
We want to find spline coefficients c that minimize the difference between
the spline and the data, while also keeping the spline smooth. This is done
by solving:
minimize || W^{1/2} (Y - B c) ||^2 + s * c^T @ R @ c
subject to periodic constraints on c.
where:
- Y is the data values,
- B is the matrix of B-spline basis functions at points x,
- W is a weighting matrix for the data points,
- s is the smoothing parameter (larger s means smoother curve),
- R is a matrix that penalizes wiggliness of the spline,
- c spline coefficients to be solved for
- periodic constraints ensure the spline repeats smoothly.
The solution is obtained by forming augmented matrices and performing
a QR factorization that incorporates these constraints, following the
approach in FITPACK's `fpperi.f`.
Parameters:
-----------
x : array_like, shape (n,)
y : array_like, shape (n, m)
t : array_like, shape (nt,)
Knot vector for the spline
k : int
Degree of the spline.
s : float
Controls smoothness: bigger s means smoother curve, smaller s fits data closer.
w : array_like, shape (n,), optional
Weights for data points. Defaults to all ones.
R, Y, A1, A2, Z : arrays, optional
Precomputed matrices from least squares and QR factorization steps to speed up
repeated fits with the same knots and data.
Attributes:
-----------
G1, G2 : arrays
Augmented matrices combining the original QR factors and constraints related to
the spline basis and data. G1 is roughly the "upper-triangular" part;
G2 contains additional constraint information for periodicity.
H1, H2 : arrays
Matrices associated with the discontinuity jump constraints of the k-th
derivative of B-splines at the knots. These encode the periodicity
conditions and are scaled by the smoothing parameter.
offset : array
Offset indices used for efficient indexing during QR reduction.
Methods:
--------
__call__(p):
Perform QR reduction of augmented matrices scaled by 1/p, solve for spline
coefficients, and return residual difference fp - s.
References:
-----------
- FITPACK's fpperi.f and fpcurf.f Fortran routines for periodic spline fitting.
"""
def __init__(self, x, y, t, k, s, w=None, *,
R=None, Y=None, A1=None, A2=None, Z=None):
# Initialize the class with input data points x, y,
# knot vector t, spline degree k, smoothing factor s,
# optional weights w, and optionally precomputed matrices.
self.x = x
self.y = y
self.t = t
self.k = k
# If weights not provided, default to uniform weights (all ones)
w = np.ones_like(x, dtype=float) if w is None else w
if w.ndim != 1:
raise ValueError(f"{w.ndim = } != 1.")
self.w = w
self.s = s
if y.ndim != 2:
raise ValueError(f"F: expected y.ndim == 2, got {y.ndim = } instead.")
# ### precompute matrices and factors needed for spline fitting ###
# Compute the discontinuity jump vector 'b' for the k-th derivative
# of B-splines at internal knots. This is needed for enforcing
# periodicity constraints. The jump discontinuities are stored in b.
# Refer: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpcurf.f#L252
b, b_offset, b_nc = disc(t, k)
# If QR factorization or auxiliary matrices (A1, A2, Z) are not provided,
# compute them via least squares on the data (x,y) with weights w.
# These matrices come from fitting B-spline basis to data.
if ((A1 is None or A2 is None or Z is None) or
(R is None and Y is None)):
# _lsq_solve_qr_for_root_rati_periodic computes QR factorization of
# data matrix and returns related matrices.
# Refer: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L171-L215
R, A1, A2, Z, _, _, _, _ = _lsq_solve_qr_for_root_rati_periodic(
x, y, t, k, w)
# Initialize augmented matrices G1, G2, H1, H2 and offset used
# for periodic B-spline fitting with constraints.
# This calls the C++ function `init_augmented_matrices` to set
# up these matrices based on A1, A2, and discontinuity vector b.
# Refer: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L441-L493
G1, G2, H1, H2, offset = _dierckx.init_augmented_matrices(A1, A2, b, len(t), k)
# Store these matrices as class attributes for use in evaluation
self.G1_ = G1
self.G2_ = G2
self.H1_ = H1
self.H2_ = H2
self.Z_ = Z
self.offset_ = offset
def __call__(self, p):
# Create copies of the augmented matrices to avoid overwriting originals
G1 = self.G1_.copy()
G2 = self.G2_.copy()
H1 = self.H1_.copy()
H2 = self.H2_.copy()
Z = self.Z_.copy()
# Scale H1 and H2 by the inverse of p (1/p), applying the pinv multiplier
# This scaling is required before QR reduction step to control smoothing.
pinv = 1/p
H1 = H1 * pinv
H2 = H2 * pinv
# Initialize vector c for coefficients with shape compatible with matrices.
# The first part of c is set from Z, which is related to least squares fit.
c = np.empty((len(self.t) - self.k - 1, Z.shape[1]), dtype=Z.dtype)
c[:len(self.t) - 2*self.k - 1, :] = Z[:len(self.t) - 2*self.k - 1, :]
# Perform QR factorization reduction on the augmented matrices
# This corresponds to the C++ function qr_reduce_augmented_matrices.
# It applies Givens rotations to eliminate entries and
# reduces the problem dimension by accounting for constraints.
# Refer: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L498-L534
_dierckx.qr_reduce_augmented_matrices(
G1, G2, H1, H2, c, self.offset_, len(self.t), self.k)
# Solve for the B-spline coefficients by backward substitution
# using the reduced matrices. This corresponds to the fpbacp routine in fitpack.
c, _, fp = _dierckx.fpbacp(
G1, G2, c,
self.k, self.k + 1, self.x[:-1], self.y[:-1, :],
self.t, self.w[:-1])
# Construct a BSpline object using knot vector t, coefficients c, and degree k
spl = BSpline(self.t, c, self.k)
self.spl = spl # store it in the object
# Return the difference between the final residual fp and smoothing parameter s
# This quantity is used to assess fit quality in root-finding procedures.
return fp - self.s
def fprati(p1, f1, p2, f2, p3, f3):
"""The root of r(p) = (u*p + v) / (p + w) given three points and values,
(p1, f2), (p2, f2) and (p3, f3).
The FITPACK analog adjusts the bounds, and we do not
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fprati.f
NB: FITPACK uses p < 0 to encode p=infinity. We just use the infinity itself.
Since the bracket is ``p1 <= p2 <= p3``, ``p3`` can be infinite (in fact,
this is what the minimizer starts with, ``p3=inf``).
"""
h1 = f1 * (f2 - f3)
h2 = f2 * (f3 - f1)
h3 = f3 * (f1 - f2)
if p3 == np.inf:
return -(p2*h1 + p1*h2) / h3
return -(p1*p2*h3 + p2*p3*h1 + p1*p3*h2) / (p1*h1 + p2*h2 + p3*h3)
| Fperiodic |
python | scrapy__scrapy | tests/test_engine.py | {
"start": 3070,
"end": 3431
} | class ____(MySpider):
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
crawler.signals.connect(spider.spider_idle, signals.spider_idle)
return spider
def spider_idle(self):
raise CloseSpider(reason="custom_reason")
| ChangeCloseReasonSpider |
python | pydata__xarray | xarray/backends/h5netcdf_.py | {
"start": 14857,
"end": 22676
} | class ____(BackendEntrypoint):
"""
Backend for netCDF files based on the h5netcdf package.
It can open ".nc", ".nc4", ".cdf" files but will only be
selected as the default if the "netcdf4" engine is not available.
Additionally it can open valid HDF5 files, see
https://h5netcdf.org/#invalid-netcdf-files for more info.
It will not be detected as valid backend for such files, so make
sure to specify ``engine="h5netcdf"`` in ``open_dataset``.
For more information about the underlying library, visit:
https://h5netcdf.org
See Also
--------
backends.H5NetCDFStore
backends.NetCDF4BackendEntrypoint
backends.ScipyBackendEntrypoint
"""
description = (
"Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using h5netcdf in Xarray"
)
url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.H5netcdfBackendEntrypoint.html"
supports_groups = True
def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
from xarray.core.utils import is_remote_uri
filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
# Try to read magic number for local files only
is_remote = isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj)
if not is_remote:
magic_number = try_read_magic_number_from_file_or_path(filename_or_obj)
if magic_number is not None:
return magic_number.startswith(b"\211HDF\r\n\032\n")
if isinstance(filename_or_obj, str | os.PathLike):
_, ext = os.path.splitext(filename_or_obj)
return ext in {".nc", ".nc4", ".cdf"}
return False
def open_dataset(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
format="NETCDF4",
group=None,
lock=None,
invalid_netcdf=None,
phony_dims=None,
decode_vlen_strings=True,
driver=None,
driver_kwds=None,
storage_options: dict[str, Any] | None = None,
) -> Dataset:
# Keep this message for some versions
# remove and set phony_dims="access" above
emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims)
filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
store = H5NetCDFStore.open(
filename_or_obj,
format=format,
group=group,
lock=lock,
invalid_netcdf=invalid_netcdf,
phony_dims=phony_dims,
decode_vlen_strings=decode_vlen_strings,
driver=driver,
driver_kwds=driver_kwds,
storage_options=storage_options,
)
store_entrypoint = StoreBackendEntrypoint()
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
# only warn if phony_dims exist in file
# remove together with the above check
# after some versions
if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning:
_emit_phony_dims_warning()
return ds
def open_datatree(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
format="NETCDF4",
group: str | None = None,
lock=None,
invalid_netcdf=None,
phony_dims=None,
decode_vlen_strings=True,
driver=None,
driver_kwds=None,
**kwargs,
) -> DataTree:
groups_dict = self.open_groups_as_dict(
filename_or_obj,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
format=format,
group=group,
lock=lock,
invalid_netcdf=invalid_netcdf,
phony_dims=phony_dims,
decode_vlen_strings=decode_vlen_strings,
driver=driver,
driver_kwds=driver_kwds,
**kwargs,
)
return datatree_from_dict_with_io_cleanup(groups_dict)
def open_groups_as_dict(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
format="NETCDF4",
group: str | None = None,
lock=None,
invalid_netcdf=None,
phony_dims=None,
decode_vlen_strings=True,
driver=None,
driver_kwds=None,
**kwargs,
) -> dict[str, Dataset]:
from xarray.backends.common import _iter_nc_groups
from xarray.core.treenode import NodePath
from xarray.core.utils import close_on_error
# Keep this message for some versions
# remove and set phony_dims="access" above
emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims)
filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
store = H5NetCDFStore.open(
filename_or_obj,
format=format,
group=group,
lock=lock,
invalid_netcdf=invalid_netcdf,
phony_dims=phony_dims,
decode_vlen_strings=decode_vlen_strings,
driver=driver,
driver_kwds=driver_kwds,
)
# Check for a group and make it a parent if it exists
if group:
parent = NodePath("/") / NodePath(group)
else:
parent = NodePath("/")
manager = store._manager
groups_dict = {}
for path_group in _iter_nc_groups(store.ds, parent=parent):
group_store = H5NetCDFStore(manager, group=path_group, **kwargs)
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(group_store):
group_ds = store_entrypoint.open_dataset(
group_store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
if group:
group_name = str(NodePath(path_group).relative_to(parent))
else:
group_name = str(NodePath(path_group))
groups_dict[group_name] = group_ds
# only warn if phony_dims exist in file
# remove together with the above check
# after some versions
if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning:
_emit_phony_dims_warning()
return groups_dict
BACKEND_ENTRYPOINTS["h5netcdf"] = ("h5netcdf", H5netcdfBackendEntrypoint)
| H5netcdfBackendEntrypoint |
python | openai__openai-python | tests/lib/chat/test_completions_streaming.py | {
"start": 34595,
"end": 37823
} | class ____(Generic[ResponseFormatT]):
def __init__(self, stream: ChatCompletionStream[ResponseFormatT]) -> None:
self.stream = stream
self.events: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
for event in self.stream:
self.events.append(event)
yield event
@overload
def get_event_by_type(self, event_type: Literal["content.done"]) -> ContentDoneEvent[ResponseFormatT] | None: ...
@overload
def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None: ...
def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None:
return next((e for e in self.events if e.type == event_type), None)
def _make_stream_snapshot_request(
func: Callable[[OpenAI], ChatCompletionStreamManager[ResponseFormatT]],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: OpenAI,
on_event: Callable[[ChatCompletionStream[ResponseFormatT], ChatCompletionStreamEvent[ResponseFormatT]], Any]
| None = None,
) -> StreamListener[ResponseFormatT]:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:
def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert outsource(response.read()) == content_snapshot
respx_mock.stop()
client = OpenAI(
http_client=httpx.Client(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post("/chat/completions").mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "text/event-stream"},
)
)
client = mock_client
with func(client) as stream:
listener = StreamListener(stream)
for event in listener:
if on_event:
on_event(stream, event)
if live:
client.close()
return listener
def _make_raw_stream_snapshot_request(
func: Callable[[OpenAI], Iterator[ChatCompletionChunk]],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: OpenAI,
) -> None:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:
def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert outsource(response.read()) == content_snapshot
respx_mock.stop()
client = OpenAI(
http_client=httpx.Client(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post("/chat/completions").mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "text/event-stream"},
)
)
client = mock_client
stream = func(client)
consume_sync_iterator(stream)
if live:
client.close()
| StreamListener |
python | huggingface__transformers | src/transformers/models/seggpt/configuration_seggpt.py | {
"start": 783,
"end": 6492
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SegGptModel`]. It is used to instantiate a SegGPT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the SegGPT
[BAAI/seggpt-vit-large](https://huggingface.co/BAAI/seggpt-vit-large) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`list[int]`, *optional*, defaults to `[896, 448]`):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
mlp_dim (`int`, *optional*):
The dimensionality of the MLP layer in the Transformer encoder. If unset, defaults to
`hidden_size` * 4.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The drop path rate for the dropout layers.
pretrain_image_size (`int`, *optional*, defaults to 224):
The pretrained size of the absolute position embeddings.
decoder_hidden_size (`int`, *optional*, defaults to 64):
Hidden size for decoder.
use_relative_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether to use relative position embeddings in the attention layers.
merge_index (`int`, *optional*, defaults to 2):
The index of the encoder layer to merge the embeddings.
intermediate_hidden_state_indices (`list[int]`, *optional*, defaults to `[5, 11, 17, 23]`):
The indices of the encoder layers which we store as features for the decoder.
beta (`float`, *optional*, defaults to 0.01):
Regularization factor for SegGptLoss (smooth-l1 loss).
Example:
```python
>>> from transformers import SegGptConfig, SegGptModel
>>> # Initializing a SegGPT seggpt-vit-large style configuration
>>> configuration = SegGptConfig()
>>> # Initializing a model (with random weights) from the seggpt-vit-large style configuration
>>> model = SegGptModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "seggpt"
def __init__(
self,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
hidden_act="gelu",
hidden_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=[896, 448],
patch_size=16,
num_channels=3,
qkv_bias=True,
mlp_dim=None,
drop_path_rate=0.1,
pretrain_image_size=224,
decoder_hidden_size=64,
use_relative_position_embeddings=True,
merge_index=2,
intermediate_hidden_state_indices=[5, 11, 17, 23],
beta=0.01,
**kwargs,
):
super().__init__(**kwargs)
if merge_index > min(intermediate_hidden_state_indices):
raise ValueError(
f"Merge index must be less than the minimum encoder output index, but got {merge_index=} and {intermediate_hidden_state_indices=}"
)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.drop_path_rate = drop_path_rate
self.pretrain_image_size = pretrain_image_size
self.decoder_hidden_size = decoder_hidden_size
self.use_relative_position_embeddings = use_relative_position_embeddings
self.merge_index = merge_index
self.intermediate_hidden_state_indices = intermediate_hidden_state_indices
self.beta = beta
self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim
__all__ = ["SegGptConfig"]
| SegGptConfig |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 83983,
"end": 86198
} | class ____(Response):
"""
Response of projects.get_model_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "projects"
_action = "get_model_tags"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetModelTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| GetModelTagsResponse |
python | django-debug-toolbar__django-debug-toolbar | tests/test_utils.py | {
"start": 3916,
"end": 6323
} | class ____(unittest.TestCase):
"""Tests for the sanitize_and_sort_request_vars function."""
def test_dict_sanitization(self):
"""Test sanitization of a regular dictionary."""
test_dict = {
"username": "testuser",
"password": "secret123",
"api_key": "abc123",
}
result = sanitize_and_sort_request_vars(test_dict)
# Convert to dict for easier testing
result_dict = dict(result["list"])
self.assertEqual(result_dict["username"], "testuser")
self.assertEqual(result_dict["password"], "********************")
self.assertEqual(result_dict["api_key"], "********************")
def test_querydict_sanitization(self):
"""Test sanitization of a QueryDict."""
query_dict = QueryDict("username=testuser&password=secret123&api_key=abc123")
result = sanitize_and_sort_request_vars(query_dict)
# Convert to dict for easier testing
result_dict = dict(result["list"])
self.assertEqual(result_dict["username"], "testuser")
self.assertEqual(result_dict["password"], "********************")
self.assertEqual(result_dict["api_key"], "********************")
def test_non_sortable_dict_keys(self):
"""Test dictionary with keys that can't be sorted."""
test_dict = {
1: "one",
"2": "two",
None: "none",
}
result = sanitize_and_sort_request_vars(test_dict)
self.assertEqual(len(result["list"]), 3)
result_dict = dict(result["list"])
self.assertEqual(result_dict[1], "one")
self.assertEqual(result_dict["2"], "two")
self.assertEqual(result_dict[None], "none")
def test_querydict_multiple_values(self):
"""Test QueryDict with multiple values for the same key."""
query_dict = QueryDict("name=bar1&name=bar2&title=value")
result = sanitize_and_sort_request_vars(query_dict)
result_dict = dict(result["list"])
self.assertEqual(result_dict["name"], ["bar1", "bar2"])
self.assertEqual(result_dict["title"], "value")
def test_non_dict_input(self):
"""Test handling of non-dict input."""
test_input = ["not", "a", "dict"]
result = sanitize_and_sort_request_vars(test_input)
self.assertEqual(result["raw"], test_input)
| SanitizeAndSortRequestVarsTestCase |
python | pyca__cryptography | src/cryptography/hazmat/decrepit/ciphers/modes.py | {
"start": 1222,
"end": 1649
} | class ____(ModeWithInitializationVector):
name = "CFB8"
def __init__(self, initialization_vector: utils.Buffer):
utils._check_byteslike("initialization_vector", initialization_vector)
self._initialization_vector = initialization_vector
@property
def initialization_vector(self) -> utils.Buffer:
return self._initialization_vector
validate_for_algorithm = _check_iv_and_key_length
| CFB8 |
python | huggingface__transformers | src/transformers/models/unispeech/modeling_unispeech.py | {
"start": 16047,
"end": 17418
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| UniSpeechEncoderLayer |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 3733,
"end": 3994
} | class ____(TypedDict, total=False):
text: Required[str]
"""The text output from the model."""
type: Required[Literal["output_text"]]
"""The type of the output text. Always `output_text`."""
| TestingCriterionLabelModelInputEvalItemContentOutputText |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-ideal-arrays.py | {
"start": 2173,
"end": 3235
} | class ____(object):
def idealArrays(self, n, maxValue):
"""
:type n: int
:type maxValue: int
:rtype: int
"""
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def nCr(n, k):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
return (fact[n]*inv_fact[n-k] % MOD) * inv_fact[k] % MOD
result = 0
dp = collections.Counter(xrange(1, maxValue+1))
for i in xrange(n):
new_dp = collections.Counter()
total = 0
for x, c in dp.iteritems():
total = (total+c)%MOD
for y in xrange(x+x, maxValue+1, x):
new_dp[y] += c
result = (result+total*nCr(n-1, i))%MOD
dp = new_dp
return result
| Solution2 |
python | pydantic__pydantic | tests/mypy/outputs/mypy-default_ini/metaclass_args.py | {
"start": 499,
"end": 740
} | class ____(BaseModel, validate_by_name=True):
i: int = Field(2, alias='j')
MetaclassArgumentsWithDefault(i=None)
# MYPY: error: Unexpected keyword argument "i" for "MetaclassArgumentsWithDefault" [call-arg]
| MetaclassArgumentsWithDefault |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 93870,
"end": 97253
} | class ____:
"""represent an entity column returned within a Query result."""
__slots__ = ()
supports_single_entity: bool
_non_hashable_value = False
_null_column_type = False
use_id_for_hash = False
_label_name: Optional[str]
type: Union[Type[Any], TypeEngine[Any]]
expr: Union[_InternalEntityType, ColumnElement[Any]]
entity_zero: Optional[_InternalEntityType]
def setup_compile_state(self, compile_state: _ORMCompileState) -> None:
raise NotImplementedError()
def setup_dml_returning_compile_state(
self,
compile_state: _ORMCompileState,
adapter: Optional[_DMLReturningColFilter],
) -> None:
raise NotImplementedError()
def row_processor(self, context, result):
raise NotImplementedError()
@classmethod
def to_compile_state(
cls, compile_state, entities, entities_collection, is_current_entities
):
for idx, entity in enumerate(entities):
if entity._is_lambda_element:
if entity._is_sequence:
cls.to_compile_state(
compile_state,
entity._resolved,
entities_collection,
is_current_entities,
)
continue
else:
entity = entity._resolved
if entity.is_clause_element:
if entity.is_selectable:
if "parententity" in entity._annotations:
_MapperEntity(
compile_state,
entity,
entities_collection,
is_current_entities,
)
else:
_ColumnEntity._for_columns(
compile_state,
entity._select_iterable,
entities_collection,
idx,
is_current_entities,
)
else:
if entity._annotations.get("bundle", False):
_BundleEntity(
compile_state,
entity,
entities_collection,
is_current_entities,
)
elif entity._is_clause_list:
# this is legacy only - test_composites.py
# test_query_cols_legacy
_ColumnEntity._for_columns(
compile_state,
entity._select_iterable,
entities_collection,
idx,
is_current_entities,
)
else:
_ColumnEntity._for_columns(
compile_state,
[entity],
entities_collection,
idx,
is_current_entities,
)
elif entity.is_bundle:
_BundleEntity(compile_state, entity, entities_collection)
return entities_collection
| _QueryEntity |
python | sympy__sympy | sympy/codegen/cfunctions.py | {
"start": 7439,
"end": 8539
} | class ____(Function):
"""
Represents the logarithm function with base ten.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log10
>>> log10(100).evalf() == 2.0
True
>>> log10(x).diff(x)
1/(x*log(10))
See Also
========
log2
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(log(_Ten)*self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_number:
result = log.eval(arg, base=_Ten)
if result.is_Atom:
return result
elif arg.is_Pow and arg.base == _Ten:
return arg.exp
def _eval_expand_func(self, **hints):
return _log10(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log10(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _Sqrt(x):
return Pow(x, S.Half)
| log10 |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_bitbucket_server.py | {
"start": 704,
"end": 2396
} | class ____(TestCase):
get_response = MagicMock(return_value=HttpResponse(content=b"no-error", status=200))
factory = RequestFactory()
region = Region("us", 1, "https://us.testserver", RegionCategory.MULTI_TENANT)
region_config = (region,)
@override_regions(region_config)
@override_settings(SILO_MODE=SiloMode.CONTROL)
def test_routing_webhook(self) -> None:
region_route = reverse(
"sentry-extensions-bitbucketserver-webhook",
kwargs={"organization_id": self.organization.id, "integration_id": self.integration.id},
)
with outbox_runner():
request = self.factory.post(region_route)
parser = BitbucketServerRequestParser(request=request, response_handler=self.get_response)
# Missing region
OrganizationMapping.objects.get(organization_id=self.organization.id).update(
region_name="eu"
)
with mock.patch.object(
parser, "get_response_from_control_silo"
) as get_response_from_control_silo:
parser.get_response()
assert get_response_from_control_silo.called
# Valid region
OrganizationMapping.objects.get(organization_id=self.organization.id).update(
region_name="us"
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 202
assert response.content == b""
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"bitbucket_server:{self.organization.id}",
region_names=[self.region.name],
)
| BitbucketServerRequestParserTest |
python | pytorch__pytorch | test/test_sympy_utils.py | {
"start": 6188,
"end": 13252
} | class ____(TestCase):
@parametrize("fn", UNARY_OPS)
@parametrize("dtype", ("int", "float"))
def test_unary_ref(self, fn, dtype):
dtype = {"int": sympy.Integer, "float": sympy.Float}[dtype]
for v in CONSTANTS:
if not valid_unary(fn, v):
continue
with self.subTest(v=v):
v = dtype(v)
ref_r = getattr(ReferenceAnalysis, fn)(v)
r = getattr(ValueRangeAnalysis, fn)(v)
self.assertEqual(r.lower.is_integer, r.upper.is_integer)
self.assertEqual(r.lower, r.upper)
self.assertEqual(ref_r.is_integer, r.upper.is_integer)
self.assertEqual(ref_r, r.lower)
def test_pow_half(self):
ValueRangeAnalysis.pow(ValueRanges.unknown(), ValueRanges.wrap(0.5))
@parametrize("fn", BINARY_OPS)
@parametrize("dtype", ("int", "float"))
def test_binary_ref(self, fn, dtype):
to_dtype = {"int": sympy.Integer, "float": sympy.Float}
# Don't test bitwise methods since value range analysis on a singleton
# range may not return a singleton result.
if fn in BITWISE_OPS:
return
# Don't test float on int only methods
if dtype == "float" and fn in ["pow_by_natural", "mod"]:
return
dtype = to_dtype[dtype]
for a, b in itertools.product(CONSTANTS, repeat=2):
if not valid_binary(fn, a, b):
continue
a = dtype(a)
b = dtype(b)
with self.subTest(a=a, b=b):
r = getattr(ValueRangeAnalysis, fn)(a, b)
if r == ValueRanges.unknown():
continue
ref_r = getattr(ReferenceAnalysis, fn)(a, b)
self.assertEqual(r.lower.is_integer, r.upper.is_integer)
self.assertEqual(ref_r.is_integer, r.upper.is_integer)
self.assertEqual(r.lower, r.upper)
self.assertEqual(ref_r, r.lower)
def test_mul_zero_unknown(self):
self.assertEqual(
ValueRangeAnalysis.mul(ValueRanges.wrap(0), ValueRanges.unknown()),
ValueRanges.wrap(0),
)
self.assertEqual(
ValueRangeAnalysis.mul(ValueRanges.wrap(0.0), ValueRanges.unknown()),
ValueRanges.wrap(0.0),
)
@parametrize("fn", UNARY_BOOL_OPS)
def test_unary_bool_ref_range(self, fn):
vals = [sympy.false, sympy.true]
for a in generate_range(vals):
with self.subTest(a=a):
ref_r = getattr(ValueRangeAnalysis, fn)(a)
unique = set()
for a0 in vals:
if a0 not in a:
continue
with self.subTest(a0=a0):
r = getattr(ReferenceAnalysis, fn)(a0)
self.assertIn(r, ref_r)
unique.add(r)
if ref_r.lower == ref_r.upper:
self.assertEqual(len(unique), 1)
else:
self.assertEqual(len(unique), 2)
@parametrize("fn", BINARY_BOOL_OPS + BITWISE_OPS)
def test_binary_bool_ref_range(self, fn):
vals = [sympy.false, sympy.true]
for a, b in itertools.product(generate_range(vals), repeat=2):
with self.subTest(a=a, b=b):
ref_r = getattr(ValueRangeAnalysis, fn)(a, b)
unique = set()
for a0, b0 in itertools.product(vals, repeat=2):
if a0 not in a or b0 not in b:
continue
with self.subTest(a0=a0, b0=b0):
r = getattr(ReferenceAnalysis, fn)(a0, b0)
self.assertIn(r, ref_r)
unique.add(r)
if ref_r.lower == ref_r.upper:
self.assertEqual(len(unique), 1)
else:
self.assertEqual(len(unique), 2)
@parametrize("fn", UNARY_OPS)
def test_unary_ref_range(self, fn):
# TODO: bring back sympy.oo testing for float unary fns
vals = CONSTANTS
for a in generate_range(vals):
with self.subTest(a=a):
ref_r = getattr(ValueRangeAnalysis, fn)(a)
for a0 in CONSTANTS:
if a0 not in a:
continue
if not valid_unary(fn, a0):
continue
with self.subTest(a0=a0):
r = getattr(ReferenceAnalysis, fn)(sympy.Integer(a0))
self.assertIn(r, ref_r)
# This takes about 4s for all the variants
@parametrize("fn", BINARY_OPS + COMPARE_OPS)
def test_binary_ref_range(self, fn):
# TODO: bring back sympy.oo testing for float unary fns
vals = LESS_CONSTANTS
for a, b in itertools.product(generate_range(vals), repeat=2):
# don't attempt pow on exponents that are too large (but oo is OK)
if fn == "pow" and b.upper > 4 and b.upper != sympy.oo:
continue
with self.subTest(a=a, b=b):
for a0, b0 in itertools.product(LESS_CONSTANTS, repeat=2):
if a0 not in a or b0 not in b:
continue
if not valid_binary(fn, a0, b0):
continue
with self.subTest(a0=a0, b0=b0):
ref_r = getattr(ValueRangeAnalysis, fn)(a, b)
r = getattr(ReferenceAnalysis, fn)(
sympy.Integer(a0), sympy.Integer(b0)
)
if r.is_finite:
self.assertIn(r, ref_r)
# stronger test specially for bitwise ops
@parametrize("fn", BITWISE_OPS)
def test_bitwise_ref_range(self, fn):
# N^4 complexity
vals = range(-4, 5)
for a, b in itertools.product(generate_range(vals), repeat=2):
with self.subTest(a=a, b=b):
for a0, b0 in itertools.product(vals, repeat=2):
if a0 not in a or b0 not in b:
continue
with self.subTest(a0=a0, b0=b0):
ref_r = getattr(ValueRangeAnalysis, fn)(a, b)
r = getattr(ReferenceAnalysis, fn)(a0, b0)
self.assertIn(r, ref_r)
# test that bitwise ops can take bool arguments
bool_vals = [
(3, sympy.true),
(3, sympy.false),
(sympy.true, 3),
(sympy.false, 3),
(sympy.true, sympy.true),
(sympy.true, sympy.false),
(sympy.false, sympy.true),
(sympy.false, sympy.false),
]
for a, b in bool_vals:
with self.subTest(a=a, b=b):
ref_r = getattr(ValueRangeAnalysis, fn)(a, b)
r = getattr(ReferenceAnalysis, fn)(a, b)
self.assertIn(r, ref_r)
| TestValueRanges |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_decorators_1.py | {
"start": 540,
"end": 570
} | class ____:
x: numpy.ndarray
| F |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_emails_confirm.py | {
"start": 635,
"end": 858
} | class ____(Response):
def __init__(self) -> None:
super().__init__(
{"detail": "Invalid email", "email": "Invalid email"},
status=status.HTTP_400_BAD_REQUEST,
)
| InvalidEmailResponse |
python | astropy__astropy | astropy/cosmology/_src/traits/rhocrit.py | {
"start": 274,
"end": 1130
} | class ____:
"""The object has attributes and methods for the critical density."""
critical_density0: Quantity
"""Critical density at redshift 0."""
efunc: Callable[[Any], NDArray[Any]]
def critical_density(self, z: Quantity | ArrayLike, /) -> Quantity:
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
rho : Quantity ['mass density']
Critical density at each input redshift.
"""
return self.critical_density0 * self.efunc(z) ** 2
| CriticalDensity |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 19185,
"end": 20312
} | class ____(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
| TestEigvals |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py | {
"start": 46939,
"end": 53282
} | class ____(GoogleCloudBaseOperator):
"""
Perform DML or DDL query on an existing Cloud Sql instance.
It optionally uses cloud-sql-proxy to establish secure connection with the
database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLExecuteQueryOperator`
:param sql: SQL query or list of queries to run (should be DML or DDL query -
this operator does not return any data from the database,
so it is useless to pass it DQL queries. Note that it is responsibility of the
author of the queries to make sure that the queries are idempotent. For example
you can use CREATE TABLE IF NOT EXISTS to create a table.
:param parameters: (optional) the parameters to render the SQL query with.
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param gcp_conn_id: The connection ID used to connect to Google Cloud for
cloud-sql-proxy authentication.
:param gcp_cloudsql_conn_id: The connection ID used to connect to Google Cloud SQL
its schema should be gcpcloudsql://.
See :class:`~airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLDatabaseHook` for
details on how to define ``gcpcloudsql://`` connection.
:param sql_proxy_binary_path: (optional) Path to the cloud-sql-proxy binary.
is not specified or the binary is not present, it is automatically downloaded.
:param ssl_cert: (optional) Path to client certificate to authenticate when SSL is used. Overrides the
connection field ``sslcert``.
:param ssl_key: (optional) Path to client private key to authenticate when SSL is used. Overrides the
connection field ``sslkey``.
:param ssl_root_cert: (optional) Path to server's certificate to authenticate when SSL is used. Overrides
the connection field ``sslrootcert``.
:param ssl_secret_id: (optional) ID of the secret in Google Cloud Secret Manager that stores SSL
certificate in the format below:
{'sslcert': '',
'sslkey': '',
'sslrootcert': ''}
Overrides the connection fields ``sslcert``, ``sslkey``, ``sslrootcert``.
Note that according to the Secret Manager requirements, the mentioned dict should be saved as a
string, and encoded with base64.
Note that this parameter is incompatible with parameters ``ssl_cert``, ``ssl_key``, ``ssl_root_cert``.
"""
# [START gcp_sql_query_template_fields]
template_fields: Sequence[str] = (
"sql",
"gcp_cloudsql_conn_id",
"gcp_conn_id",
"ssl_server_cert",
"ssl_client_cert",
"ssl_client_key",
"ssl_secret_id",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
# [END gcp_sql_query_template_fields]
ui_color = "#D3DEF1"
def __init__(
self,
*,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping[str, Any] | None = None,
gcp_conn_id: str = "google_cloud_default",
gcp_cloudsql_conn_id: str = "google_cloud_sql_default",
sql_proxy_binary_path: str | None = None,
ssl_server_cert: str | None = None,
ssl_client_cert: str | None = None,
ssl_client_key: str | None = None,
ssl_secret_id: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.autocommit = autocommit
self.parameters = parameters
self.gcp_connection: Connection | None = None
self.sql_proxy_binary_path = sql_proxy_binary_path
self.ssl_server_cert = ssl_server_cert
self.ssl_client_cert = ssl_client_cert
self.ssl_client_key = ssl_client_key
self.ssl_secret_id = ssl_secret_id
@contextmanager
def cloud_sql_proxy_context(self, hook: CloudSQLDatabaseHook):
cloud_sql_proxy_runner = None
try:
if hook.use_proxy:
cloud_sql_proxy_runner = hook.get_sqlproxy_runner()
hook.free_reserved_port()
# There is very, very slim chance that the socket will
# be taken over here by another bind(0).
# It's quite unlikely to happen though!
cloud_sql_proxy_runner.start_proxy()
yield
finally:
if cloud_sql_proxy_runner:
cloud_sql_proxy_runner.stop_proxy()
def execute(self, context: Context):
hook = self.hook
hook.validate_ssl_certs()
connection = hook.create_connection()
hook.validate_socket_path_length()
database_hook = hook.get_database_hook(connection=connection)
try:
with self.cloud_sql_proxy_context(hook):
self.log.info('Executing: "%s"', self.sql)
database_hook.run(self.sql, self.autocommit, parameters=self.parameters)
finally:
hook.cleanup_database_hook()
@cached_property
def hook(self):
self.gcp_connection = BaseHook.get_connection(self.gcp_conn_id)
return CloudSQLDatabaseHook(
gcp_cloudsql_conn_id=self.gcp_cloudsql_conn_id,
gcp_conn_id=self.gcp_conn_id,
default_gcp_project_id=get_field(self.gcp_connection.extra_dejson, "project"),
sql_proxy_binary_path=self.sql_proxy_binary_path,
ssl_root_cert=self.ssl_server_cert,
ssl_cert=self.ssl_client_cert,
ssl_key=self.ssl_client_key,
ssl_secret_id=self.ssl_secret_id,
)
def get_openlineage_facets_on_complete(self, _) -> OperatorLineage | None:
from airflow.providers.common.compat.openlineage.utils.sql import get_openlineage_facets_with_sql
with self.cloud_sql_proxy_context(self.hook):
return get_openlineage_facets_with_sql(
hook=self.hook.db_hook,
sql=self.sql, # type:ignore[arg-type] # Iterable[str] instead of list[str]
conn_id=self.gcp_cloudsql_conn_id,
database=self.hook.database,
)
| CloudSQLExecuteQueryOperator |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/_error_x.py | {
"start": 233,
"end": 14881
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.error_x"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"copy_zstyle",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
@property
def color(self):
"""
Sets the stroke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def copy_zstyle(self):
"""
The 'copy_zstyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_zstyle"]
@copy_zstyle.setter
def copy_zstyle(self, val):
self["copy_zstyle"] = val
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_zstyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorX object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.ErrorX`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorX
"""
super().__init__("error_x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.ErrorX
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.ErrorX`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("array", arg, array)
self._set_property("arrayminus", arg, arrayminus)
self._set_property("arrayminussrc", arg, arrayminussrc)
self._set_property("arraysrc", arg, arraysrc)
self._set_property("color", arg, color)
self._set_property("copy_zstyle", arg, copy_zstyle)
self._set_property("symmetric", arg, symmetric)
self._set_property("thickness", arg, thickness)
self._set_property("traceref", arg, traceref)
self._set_property("tracerefminus", arg, tracerefminus)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._set_property("valueminus", arg, valueminus)
self._set_property("visible", arg, visible)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ErrorX |
python | spyder-ide__spyder | spyder/plugins/debugger/widgets/main_widget.py | {
"start": 2001,
"end": 2164
} | class ____:
Control = 'control_section'
InteractWithConsole = "interact_with_console_section"
Extras = "extras_section"
| DebuggerWidgetMainToolBarSections |
python | python-openxml__python-docx | tests/oxml/unitdata/shared.py | {
"start": 102,
"end": 392
} | class ____(BaseBuilder):
__nspfxs__ = ("w",)
__attrs__ = "w:val"
def __init__(self, tag):
self.__tag__ = tag
super(CT_OnOffBuilder, self).__init__()
def with_val(self, value):
self._set_xmlattr("w:val", str(value))
return self
| CT_OnOffBuilder |
python | ipython__ipython | IPython/core/magic_arguments.py | {
"start": 7066,
"end": 7531
} | class ____:
""" Base class for decorators to add ArgumentParser information to a method.
"""
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
func.decorators.append(self)
return func
def add_to_parser(self, parser, group):
""" Add this object's information to the parser, if necessary.
"""
pass
| ArgDecorator |
python | astropy__astropy | astropy/coordinates/representation/base.py | {
"start": 4518,
"end": 22462
} | class ____(MaskableShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__: Final = 50000
# Have to define this default b/c init_subclass is not called for the base class.
name: ClassVar[str] = "base"
"""Name of the representation or differential.
When a subclass is defined, by default, the name is the lower-cased name of the
class with with any trailing 'representation' or 'differential' removed. (E.g.,
'spherical' for `~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
This can be customized when defining a subclass by setting the class attribute.
"""
info = BaseRepresentationOrDifferentialInfo()
def __init_subclass__(cls) -> None:
# Name of the representation or differential
if "name" not in cls.__dict__:
cls.name = (
cls.__name__.lower()
.removesuffix("representation")
.removesuffix("differential")
)
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (
args
and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])
):
rep_or_diff = args[0]
copy = kwargs.pop("copy", True)
attrs = [getattr(rep_or_diff, component) for component in components]
if "info" in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(
"unexpected keyword arguments for case "
f"where class instance is passed in: {kwargs}"
)
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(
"__init__() missing 1 required positional "
f"argument: {component!r}"
) from None
if attr is None:
raise TypeError(
"__init__() missing 1 required positional argument:"
f" {component!r} (or first argument should be an instance of"
f" {self.__class__.__name__})."
)
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop("copy", True)
if args:
raise TypeError(f"unexpected arguments: {args}")
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(
f"__init__() got multiple values for argument {component!r}"
)
raise TypeError(f"unexpected keyword arguments: {kwargs}")
# Pass attributes through the required initializing classes.
attrs = [
self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)
]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = " and ".join(components)
else:
c_str = ", ".join(components[:2]) + ", and " + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [
(bc_attr.copy() if copy else bc_attr)
if bc_attr.shape != attr.shape
else attr
for attr, bc_attr in zip(attrs, bc_attrs)
]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, "_" + component, attr)
# If any attribute has a mask, ensure all attributes are Masked.
if any(hasattr(attr, "mask") for attr in attrs):
self._ensure_masked()
@deprecated("v7.1", alternative="name")
@classmethod
def get_name(cls):
"""Name of the representation or differential.
Returns the ``.name`` attribute.
"""
return cls.name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `~astropy.coordinates.CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `~astropy.coordinates.CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator.
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(
"cannot compare: objects must have same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f"cannot compare: {exc}") from exc
out = True
for comp in self.components:
out &= getattr(self, "_" + comp) == getattr(value, "_" + comp)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, "_" + component, apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
set_mask = value is np.ma.masked
clear_mask = value is np.ma.nomask
if not (value.__class__ is self.__class__ or set_mask or clear_mask):
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
" (unless setting or clearing the mask with"
" np.ma.masked or np.ma.nomask)."
)
if not self.masked:
if clear_mask:
# Clearing masked elements on an unmasked instance: nothing to do.
return
# Ensure our components are masked if a mask needs to be set.
# NOTE: we could also make ourselves masked if value.masked.
# But then we have to be sure that Time does the same, and live
# with the inconsistency that things like ndarray and Quantity cannot
# become masked when setting an item with a masked value. See
# https://github.com/astropy/astropy/pull/17016#issuecomment-2439607869
if set_mask:
self._ensure_masked()
if set_mask or clear_mask:
for comp in self.components:
c = "_" + comp
getattr(self, c).mask[item] = set_mask
return
for component in self.components:
c = "_" + component
getattr(self, c)[item] = getattr(value, c)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
@property
def masked(self):
return isinstance(getattr(self, self.components[0]), Masked)
def _ensure_masked(self):
"""Ensure Masked components."""
# TODO: should we just allow the above property to be set?
# But be sure the API remains consistent with Time!
if not self.masked:
for comp in self.components:
c = "_" + comp
setattr(self, c, Masked(getattr(self, c)))
def get_mask(self, *attrs):
"""Calculate the mask, by combining masks from the given attributes.
Parameters
----------
*attrs : str
Attributes from which to get the masks to combine. If not given,
use all components of the class.
Returns
-------
mask : ~numpy.ndarray of bool
The combined, read-only mask. If the instance is not masked, it
is an array of `False` with the correct shape.
"""
if not attrs:
attrs = self.components
values = operator.attrgetter(*attrs)(self)
if not isinstance(values, tuple):
values = (values,)
mask = combine_masks([getattr(v, "mask", None) for v in values])
return np.broadcast_to(mask, self.shape) # Makes it readonly too.
mask = property(get_mask, doc="The combined mask of all components.")
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty_like(
coo_items[0][1].value, dtype=[(c, coo.dtype) for c, coo in coo_items]
)
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units = self._units.values()
if len(units_set := set(units)) == 1:
return str(units_set.pop())
return f"({', '.join(map(str, units))})"
def __str__(self):
return f"{np.array2string(self._values, separator=', ')} {self._unitstr:s}"
def __repr__(self):
prefixstr = " "
arrstr = np.array2string(self._values, prefix=prefixstr, separator=", ")
diffstr = ""
if diffs := getattr(self, "differentials", None):
diffstr = f"\n (has differentials w.r.t.: {', '.join(map(repr, diffs))})"
unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
return (
f"<{self.__class__.__name__} ({', '.join(self.components)})"
f" {unitstr:s}\n{prefixstr}{arrstr}{diffstr}>"
)
| BaseRepresentationOrDifferential |
python | run-llama__llama_index | llama-index-core/llama_index/core/types.py | {
"start": 1041,
"end": 3250
} | class ____(DispatcherSpanMixin, ABC):
"""Output parser class."""
@abstractmethod
def parse(self, output: str) -> Any:
"""Parse, validate, and correct errors programmatically."""
def format(self, query: str) -> str:
"""Format a query with structured output formatting instructions."""
return query
def _format_message(self, message: ChatMessage) -> ChatMessage:
text_blocks: list[tuple[int, TextBlock]] = [
(idx, block)
for idx, block in enumerate(message.blocks)
if isinstance(block, TextBlock)
]
# add text to the last text block, or add a new text block
format_text = ""
if text_blocks:
format_idx = text_blocks[-1][0]
format_text = text_blocks[-1][1].text
if format_idx != -1:
# this should always be a text block
assert isinstance(message.blocks[format_idx], TextBlock)
message.blocks[format_idx].text = self.format(format_text) # type: ignore
else:
message.blocks.append(TextBlock(text=self.format(format_text)))
return message
def format_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Format a list of messages with structured output formatting instructions."""
# NOTE: apply output parser to either the first message if it's a system message
# or the last message
if messages:
if messages[0].role == MessageRole.SYSTEM:
# get text from the last text blocks
messages[0] = self._format_message(messages[0])
else:
messages[-1] = self._format_message(messages[-1])
return messages
@classmethod
def __get_pydantic_core_schema__(
cls, source: Type[Any], handler: GetCoreSchemaHandler
) -> CoreSchema:
return core_schema.any_schema()
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> Dict[str, Any]:
json_schema = handler(core_schema)
return handler.resolve_ref_schema(json_schema)
| BaseOutputParser |
python | pypa__setuptools | setuptools/_vendor/typing_extensions.py | {
"start": 4660,
"end": 53727
} | class ____(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
Final = typing.Final
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8
if sys.version_info >= (3, 10, 1):
Literal = typing.Literal
else:
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def _value_and_type_iter(params):
for p in params:
yield p, type(p)
class _LiteralGenericAlias(typing._GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
these_args_deduped = set(_value_and_type_iter(self.__args__))
other_args_deduped = set(_value_and_type_iter(other.__args__))
return these_args_deduped == other_args_deduped
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _LiteralForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, doc: str):
self._name = 'Literal'
self._doc = self.__doc__ = doc
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = _flatten_literal_params(parameters)
val_type_pairs = list(_value_and_type_iter(parameters))
try:
deduped_pairs = set(val_type_pairs)
except TypeError:
# unhashable parameters
pass
else:
# similar logic to typing._deduplicate on Python 3.9+
if len(deduped_pairs) < len(val_type_pairs):
new_parameters = []
for pair in val_type_pairs:
if pair in deduped_pairs:
new_parameters.append(pair[0])
deduped_pairs.remove(pair)
assert not deduped_pairs, deduped_pairs
parameters = tuple(new_parameters)
return _LiteralGenericAlias(self, parameters)
Literal = _LiteralForm(doc="""\
A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
DefaultDict = typing.DefaultDict
OrderedDict = typing.OrderedDict
Counter = typing.Counter
ChainMap = typing.ChainMap
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
if sys.version_info >= (3, 13, 0, "beta"):
from typing import AsyncContextManager, AsyncGenerator, ContextManager, Generator
else:
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
# Python <3.9 doesn't have typing._SpecialGenericAlias
_special_generic_alias_base = getattr(
typing, "_SpecialGenericAlias", typing._GenericAlias
)
class _SpecialGenericAlias(_special_generic_alias_base, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()):
if _special_generic_alias_base is typing._GenericAlias:
# Python <3.9
self.__origin__ = origin
self._nparams = nparams
super().__init__(origin, nparams, special=True, inst=inst, name=name)
else:
# Python >= 3.9
super().__init__(origin, nparams, inst=inst, name=name)
self._defaults = defaults
def __setattr__(self, attr, val):
allowed_attrs = {'_name', '_inst', '_nparams', '_defaults'}
if _special_generic_alias_base is typing._GenericAlias:
# Python <3.9
allowed_attrs.add("__origin__")
if _is_dunder(attr) or attr in allowed_attrs:
object.__setattr__(self, attr, val)
else:
setattr(self.__origin__, attr, val)
@typing._tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params)
if (
self._defaults
and len(params) < self._nparams
and len(params) + len(self._defaults) >= self._nparams
):
params = (*params, *self._defaults[len(params) - self._nparams:])
actual_len = len(params)
if actual_len != self._nparams:
if self._defaults:
expected = f"at least {self._nparams - len(self._defaults)}"
else:
expected = str(self._nparams)
if not self._nparams:
raise TypeError(f"{self} is not a generic class")
raise TypeError(
f"Too {'many' if actual_len > self._nparams else 'few'}"
f" arguments for {self};"
f" actual {actual_len}, expected {expected}"
)
return self.copy_with(params)
_NoneType = type(None)
Generator = _SpecialGenericAlias(
collections.abc.Generator, 3, defaults=(_NoneType, _NoneType)
)
AsyncGenerator = _SpecialGenericAlias(
collections.abc.AsyncGenerator, 2, defaults=(_NoneType,)
)
ContextManager = _SpecialGenericAlias(
contextlib.AbstractContextManager,
2,
name="ContextManager",
defaults=(typing.Optional[bool],)
)
AsyncContextManager = _SpecialGenericAlias(
contextlib.AbstractAsyncContextManager,
2,
name="AsyncContextManager",
defaults=(typing.Optional[bool],)
)
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
'typing_extensions': ['Buffer'],
}
_EXCLUDED_ATTRS = frozenset(typing.EXCLUDED_ATTRIBUTES) | {
"__match_args__", "__protocol_attrs__", "__non_callable_proto_members__",
"__final__",
}
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in {'Protocol', 'Generic'}:
continue
annotations = getattr(base, '__annotations__', {})
for attr in (*base.__dict__, *annotations):
if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS):
attrs.add(attr)
return attrs
def _caller(depth=2):
try:
return sys._getframe(depth).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
# `__match_args__` attribute was removed from protocol members in 3.13,
# we want to backport this change to older Python versions.
if sys.version_info >= (3, 13):
Protocol = typing.Protocol
else:
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
def _type_check_issubclass_arg_1(arg):
"""Raise TypeError if `arg` is not an instance of `type`
in `issubclass(arg, <protocol>)`.
In most cases, this is verified by type.__subclasscheck__.
Checking it again unnecessarily would slow down issubclass() checks,
so, we don't perform this check unless we absolutely have to.
For various error paths, however,
we want to ensure that *this* error message is shown to the user
where relevant, rather than a typing.py-specific error message.
"""
if not isinstance(arg, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Inheriting from typing._ProtocolMeta isn't actually desirable,
# but is necessary to allow typing.Protocol and typing_extensions.Protocol
# to mix without getting TypeErrors about "metaclass conflict"
class _ProtocolMeta(type(typing.Protocol)):
# This metaclass is somewhat unfortunate,
# but is necessary for several reasons...
#
# NOTE: DO NOT call super() in any methods in this class
# That would call the methods on typing._ProtocolMeta on Python 3.8-3.11
# and those are slow
def __new__(mcls, name, bases, namespace, **kwargs):
if name == "Protocol" and len(bases) < 2:
pass
elif {Protocol, typing.Protocol} & set(bases):
for base in bases:
if not (
base in {object, typing.Generic, Protocol, typing.Protocol}
or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, [])
or is_protocol(base)
):
raise TypeError(
f"Protocols can only inherit from other protocols, "
f"got {base!r}"
)
return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
def __init__(cls, *args, **kwargs):
abc.ABCMeta.__init__(cls, *args, **kwargs)
if getattr(cls, "_is_protocol", False):
cls.__protocol_attrs__ = _get_protocol_attrs(cls)
def __subclasscheck__(cls, other):
if cls is Protocol:
return type.__subclasscheck__(cls, other)
if (
getattr(cls, '_is_protocol', False)
and not _allow_reckless_class_checks()
):
if not getattr(cls, '_is_runtime_protocol', False):
_type_check_issubclass_arg_1(other)
raise TypeError(
"Instance and class checks can only be used with "
"@runtime_checkable protocols"
)
if (
# this attribute is set by @runtime_checkable:
cls.__non_callable_proto_members__
and cls.__dict__.get("__subclasshook__") is _proto_hook
):
_type_check_issubclass_arg_1(other)
non_method_attrs = sorted(cls.__non_callable_proto_members__)
raise TypeError(
"Protocols with non-method members don't support issubclass()."
f" Non-method members: {str(non_method_attrs)[1:-1]}."
)
return abc.ABCMeta.__subclasscheck__(cls, other)
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if cls is Protocol:
return type.__instancecheck__(cls, instance)
if not getattr(cls, "_is_protocol", False):
# i.e., it's a concrete subclass of a protocol
return abc.ABCMeta.__instancecheck__(cls, instance)
if (
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks()
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if abc.ABCMeta.__instancecheck__(cls, instance):
return True
for attr in cls.__protocol_attrs__:
try:
val = inspect.getattr_static(instance, attr)
except AttributeError:
break
# this attribute is set by @runtime_checkable:
if val is None and attr not in cls.__non_callable_proto_members__:
break
else:
return True
return False
def __eq__(cls, other):
# Hack so that typing.Generic.__class_getitem__
# treats typing_extensions.Protocol
# as equivalent to typing.Protocol
if abc.ABCMeta.__eq__(cls, other) is True:
return True
return cls is Protocol and other is typing.Protocol
# This has to be defined, or the abc-module cache
# complains about classes with this metaclass being unhashable,
# if we define only __eq__!
def __hash__(cls) -> int:
return type.__hash__(cls)
@classmethod
def _proto_hook(cls, other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
for attr in cls.__protocol_attrs__:
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (
isinstance(annotations, collections.abc.Mapping)
and attr in annotations
and is_protocol(other)
):
break
else:
return NotImplemented
return True
class Protocol(typing.Generic, metaclass=_ProtocolMeta):
__doc__ = typing.Protocol.__doc__
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# Prohibit instantiation for protocol classes
if cls._is_protocol and cls.__init__ is Protocol.__init__:
cls.__init__ = _no_init
if sys.version_info >= (3, 13):
runtime_checkable = typing.runtime_checkable
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False):
raise TypeError(f'@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
# typing.Protocol classes on <=3.11 break if we execute this block,
# because typing.Protocol classes on <=3.11 don't have a
# `__protocol_attrs__` attribute, and this block relies on the
# `__protocol_attrs__` attribute. Meanwhile, typing.Protocol classes on 3.12.2+
# break if we *don't* execute this block, because *they* assume that all
# protocol classes have a `__non_callable_proto_members__` attribute
# (which this block sets)
if isinstance(cls, _ProtocolMeta) or sys.version_info >= (3, 12, 2):
# PEP 544 prohibits using issubclass()
# with protocols that have non-method members.
# See gh-113320 for why we compute this attribute here,
# rather than in `_ProtocolMeta.__init__`
cls.__non_callable_proto_members__ = set()
for attr in cls.__protocol_attrs__:
try:
is_callable = callable(getattr(cls, attr, None))
except Exception as e:
raise TypeError(
f"Failed to determine whether protocol member {attr!r} "
"is a method member"
) from e
else:
if not is_callable:
cls.__non_callable_proto_members__.add(attr)
return cls
# The "runtime" alias exists for backwards compatibility.
runtime = runtime_checkable
# Our version of runtime-checkable protocols is faster on Python 3.8-3.11
if sys.version_info >= (3, 12):
SupportsInt = typing.SupportsInt
SupportsFloat = typing.SupportsFloat
SupportsComplex = typing.SupportsComplex
SupportsBytes = typing.SupportsBytes
SupportsIndex = typing.SupportsIndex
SupportsAbs = typing.SupportsAbs
SupportsRound = typing.SupportsRound
else:
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abc.abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abc.abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abc.abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abc.abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""
An ABC with one abstract method __abs__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""
An ABC with one abstract method __round__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _ensure_subclassable(mro_entries):
def inner(func):
if sys.implementation.name == "pypy" and sys.version_info < (3, 9):
cls_dict = {
"__call__": staticmethod(func),
"__mro_entries__": staticmethod(mro_entries)
}
t = type(func.__name__, (), cls_dict)
return functools.update_wrapper(t(), func)
else:
func.__mro_entries__ = mro_entries
return func
return inner
# Update this to something like >=3.13.0b1 if and when
# PEP 728 is implemented in CPython
_PEP_728_IMPLEMENTED = False
if _PEP_728_IMPLEMENTED:
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
# Aaaand on 3.12 we add __orig_bases__ to TypedDict
# to enable better runtime introspection.
# On 3.13 we deprecate some odd ways of creating TypedDicts.
# Also on 3.13, PEP 705 adds the ReadOnly[] qualifier.
# PEP 728 (still pending) makes more changes.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
# 3.10.0 and later
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
def _get_typeddict_qualifiers(annotation_type):
while True:
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
else:
break
elif annotation_origin is Required:
yield Required
annotation_type, = get_args(annotation_type)
elif annotation_origin is NotRequired:
yield NotRequired
annotation_type, = get_args(annotation_type)
elif annotation_origin is ReadOnly:
yield ReadOnly
annotation_type, = get_args(annotation_type)
else:
break
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, *, total=True, closed=False):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta and base is not typing.Generic:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
if any(issubclass(b, typing.Generic) for b in bases):
generic_base = (typing.Generic,)
else:
generic_base = ()
# typing.py generally doesn't let you inherit from plain Generic, unless
# the name of the class happens to be "Protocol"
tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns)
tp_dict.__name__ = name
if tp_dict.__qualname__ == "Protocol":
tp_dict.__qualname__ = name
if not hasattr(tp_dict, '__orig_bases__'):
tp_dict.__orig_bases__ = bases
annotations = {}
if "__annotations__" in ns:
own_annotations = ns["__annotations__"]
elif "__annotate__" in ns:
# TODO: Use inspect.VALUE here, and make the annotations lazily evaluated
own_annotations = ns["__annotate__"](1)
else:
own_annotations = {}
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
if _TAKES_MODULE:
own_annotations = {
n: typing._type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
else:
own_annotations = {
n: typing._type_check(tp, msg)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
readonly_keys = set()
mutable_keys = set()
extra_items_type = None
for base in bases:
base_dict = base.__dict__
annotations.update(base_dict.get('__annotations__', {}))
required_keys.update(base_dict.get('__required_keys__', ()))
optional_keys.update(base_dict.get('__optional_keys__', ()))
readonly_keys.update(base_dict.get('__readonly_keys__', ()))
mutable_keys.update(base_dict.get('__mutable_keys__', ()))
base_extra_items_type = base_dict.get('__extra_items__', None)
if base_extra_items_type is not None:
extra_items_type = base_extra_items_type
if closed and extra_items_type is None:
extra_items_type = Never
if closed and "__extra_items__" in own_annotations:
annotation_type = own_annotations.pop("__extra_items__")
qualifiers = set(_get_typeddict_qualifiers(annotation_type))
if Required in qualifiers:
raise TypeError(
"Special key __extra_items__ does not support "
"Required"
)
if NotRequired in qualifiers:
raise TypeError(
"Special key __extra_items__ does not support "
"NotRequired"
)
extra_items_type = annotation_type
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
qualifiers = set(_get_typeddict_qualifiers(annotation_type))
if Required in qualifiers:
required_keys.add(annotation_key)
elif NotRequired in qualifiers:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
if ReadOnly in qualifiers:
mutable_keys.discard(annotation_key)
readonly_keys.add(annotation_key)
else:
mutable_keys.add(annotation_key)
readonly_keys.discard(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
tp_dict.__readonly_keys__ = frozenset(readonly_keys)
tp_dict.__mutable_keys__ = frozenset(mutable_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
tp_dict.__closed__ = closed
tp_dict.__extra_items__ = extra_items_type
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
@_ensure_subclassable(lambda bases: (_TypedDict,))
def TypedDict(typename, fields=_marker, /, *, total=True, closed=False, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type such that a type checker will expect all
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports an additional equivalent form::
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality::
class Point2D(TypedDict, total=False):
x: int
y: int
This means that a Point2D TypedDict can have any of the keys omitted. A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The Required and NotRequired special forms can also be used to mark
individual keys as being required or not required::
class Point2D(TypedDict):
x: int # the "x" key must always be present (Required is the default)
y: NotRequired[int] # the "y" key can be omitted
See PEP 655 for more details on Required and NotRequired.
"""
if fields is _marker or fields is None:
if fields is _marker:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = TypedDict({typename!r}, {{}})`"
deprecation_msg = (
f"{deprecated_thing} is deprecated and will be disallowed in "
"Python 3.15. To create a TypedDict class with 0 fields "
"using the functional syntax, pass an empty dictionary, e.g. "
) + example + "."
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
if closed is not False and closed is not True:
kwargs["closed"] = closed
closed = False
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
if kwargs:
if sys.version_info >= (3, 13):
raise TypeError("TypedDict takes no keyword arguments")
warnings.warn(
"The kwargs-based syntax for TypedDict definitions is deprecated "
"in Python 3.11, will be removed in Python 3.13, and may not be "
"understood by third-party type checkers.",
DeprecationWarning,
stacklevel=2,
)
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
td = _TypedDictMeta(typename, (), ns, total=total, closed=closed)
td.__orig_bases__ = (TypedDict,)
return td
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
# On 3.8, this would otherwise return True
if hasattr(typing, "TypedDict") and tp is typing.TypedDict:
return False
return isinstance(tp, _TYPEDDICT_TYPES)
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(val, typ, /):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return val
if hasattr(typing, "ReadOnly"): # 3.13+
get_type_hints = typing.get_type_hints
else: # <=3.13
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return _types.GenericAlias(t.__origin__, stripped_args)
if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"): # 3.9+
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else: # 3.8
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__, *self.__metadata__)
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.8-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__, *tp.__metadata__)
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.8
else:
TypeAlias = _ExtensionsSpecialForm(
'TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above."""
)
if hasattr(typing, "NoDefault"):
NoDefault = typing.NoDefault
else:
class NoDefaultTypeMeta(type):
def __setattr__(cls, attr, value):
# TypeError is consistent with the behavior of NoneType
raise TypeError(
f"cannot set {attr!r} attribute of immutable type {cls.__name__!r}"
)
class NoDefaultType(metaclass=NoDefaultTypeMeta):
"""The type of the NoDefault singleton."""
__slots__ = ()
def __new__(cls):
return globals().get("NoDefault") or object.__new__(cls)
def __repr__(self):
return "typing_extensions.NoDefault"
def __reduce__(self):
return "NoDefault"
NoDefault = NoDefaultType()
del NoDefaultType, NoDefaultTypeMeta
def _set_default(type_param, default):
type_param.has_default = lambda: default is not NoDefault
type_param.__default__ = default
def _set_module(typevarlike):
# for pickling:
def_mod = _caller(depth=3)
if def_mod != 'typing_extensions':
typevarlike.__module__ = def_mod
| _ExtensionsSpecialForm |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 1367,
"end": 4226
} | class ____(nn.Module):
def forward(
self,
value: Tensor,
value_spatial_shapes: Tensor,
value_spatial_shapes_list: list[tuple],
level_start_index: Tensor,
sampling_locations: Tensor,
attention_weights: Tensor,
im2col_step: int,
):
batch_size, _, num_heads, hidden_dim = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([height * width for height, width in value_spatial_shapes_list], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level_id, (height, width) in enumerate(value_spatial_shapes_list):
# batch_size, height*width, num_heads, hidden_dim
# -> batch_size, height*width, num_heads*hidden_dim
# -> batch_size, num_heads*hidden_dim, height*width
# -> batch_size*num_heads, hidden_dim, height, width
value_l_ = (
value_list[level_id]
.flatten(2)
.transpose(1, 2)
.reshape(batch_size * num_heads, hidden_dim, height, width)
)
# batch_size, num_queries, num_heads, num_points, 2
# -> batch_size, num_heads, num_queries, num_points, 2
# -> batch_size*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
# batch_size*num_heads, hidden_dim, num_queries, num_points
sampling_value_l_ = nn.functional.grid_sample(
value_l_,
sampling_grid_l_,
mode="bilinear",
padding_mode="zeros",
align_corners=False,
)
sampling_value_list.append(sampling_value_l_)
# (batch_size, num_queries, num_heads, num_levels, num_points)
# -> (batch_size, num_heads, num_queries, num_levels, num_points)
# -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
batch_size * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(batch_size, num_heads * hidden_dim, num_queries)
)
return output.transpose(1, 2).contiguous()
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the TestDetrDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
"""
)
| MultiScaleDeformableAttention |
python | ray-project__ray | python/ray/autoscaler/v2/schema.py | {
"start": 675,
"end": 846
} | class ____:
# Resource name.
resource_name: str = ""
# Total resource.
total: float = 0.0
# Resource used.
used: float = 0.0
@dataclass
| ResourceUsage |
python | pallets__jinja | src/jinja2/runtime.py | {
"start": 33247,
"end": 34148
} | class ____(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = Undefined._fail_with_undefined_error
__eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
__contains__ = Undefined._fail_with_undefined_error
| StrictUndefined |
python | django__django | django/contrib/gis/forms/fields.py | {
"start": 4393,
"end": 4472
} | class ____(GeometryField):
geom_type = "MULTILINESTRING"
| MultiLineStringField |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/react/output_parser.py | {
"start": 2189,
"end": 4566
} | class ____(BaseOutputParser):
"""ReAct Output parser."""
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""
Parse output from ReAct agent.
We expect the output to be in one of the following formats:
1. If the agent need to use a tool to answer the question:
```
Thought: <thought>
Action: <action>
Action Input: <action_input>
```
2. If the agent can answer the question without any tools:
```
Thought: <thought>
Answer: <answer>
```
"""
# Use regex to find properly formatted keywords at line boundaries
thought_match = re.search(r"Thought:", output, re.MULTILINE)
action_match = re.search(r"Action:", output, re.MULTILINE)
answer_match = re.search(r"Answer:", output, re.MULTILINE)
thought_idx = thought_match.start() if thought_match else None
action_idx = action_match.start() if action_match else None
answer_idx = answer_match.start() if answer_match else None
if thought_idx is None and action_idx is None and answer_idx is None:
# NOTE: handle the case where the agent directly outputs the answer
# instead of following the thought-answer format
return ResponseReasoningStep(
thought="(Implicit) I can answer without any more tools!",
response=output,
is_streaming=is_streaming,
)
# An "Action" should take priority over an "Answer"
if (
action_idx is not None
and answer_idx is not None
and action_idx < answer_idx
):
return parse_action_reasoning_step(output)
elif action_idx is not None and answer_idx is None:
return parse_action_reasoning_step(output)
if answer_idx is not None:
thought, answer = extract_final_response(output)
return ResponseReasoningStep(
thought=thought, response=answer, is_streaming=is_streaming
)
raise ValueError(f"Could not parse output: {output}")
def format(self, output: str) -> str:
"""Format a query with structured output formatting instructions."""
raise NotImplementedError
| ReActOutputParser |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_artifacts.py | {
"start": 29738,
"end": 31835
} | class ____:
async def test_update_artifact_succeeds(self, artifact, client):
response = await client.post("/artifacts/filter")
current_time = now("UTC")
assert response.status_code == status.HTTP_200_OK
artifact_id = response.json()[0]["id"]
artifact_key = response.json()[0]["key"]
artifact_flow_run_id = response.json()[0]["flow_run_id"]
response = await client.patch(
f"/artifacts/{artifact_id}",
json={"data": {"new": "data"}},
)
assert response.status_code == 204
response = await client.get(f"/artifacts/{artifact_id}")
updated_artifact = parse_obj_as(schemas.core.Artifact, response.json())
assert updated_artifact.data == {"new": "data"}
assert updated_artifact.key == artifact_key
assert str(updated_artifact.flow_run_id) == artifact_flow_run_id
assert updated_artifact.created < current_time
assert updated_artifact.updated > current_time
async def test_update_artifact_does_not_update_if_fields_are_not_set(
self, artifact, client
):
current_time = now("UTC")
artifact_id = artifact["id"]
response = await client.patch(
f"/artifacts/{artifact_id}",
json={},
)
assert response.status_code == 204
response = await client.get(f"/artifacts/{artifact_id}")
updated_artifact = parse_obj_as(schemas.core.Artifact, response.json())
assert updated_artifact.data == artifact["data"]
assert updated_artifact.key == artifact["key"]
assert str(updated_artifact.flow_run_id) == artifact["flow_run_id"]
assert updated_artifact.created < current_time
assert updated_artifact.updated > current_time
async def test_update_artifact_raises_error_if_artifact_not_found(
self, artifacts, client
):
response = await client.patch(
f"/artifacts/{str(uuid4())}",
json={"data": {"new": "data"}},
)
assert response.status_code == 404
| TestUpdateArtifact |
python | walkccc__LeetCode | solutions/53. Maximum Subarray/53-3.py | {
"start": 271,
"end": 1113
} | class ____:
def maxSubArray(self, nums: list[int]) -> int:
def divideAndConquer(l: int, r: int) -> T:
if l == r:
return T(nums[l], nums[l], nums[l], nums[l])
m = (l + r) // 2
left = divideAndConquer(l, m)
right = divideAndConquer(m + 1, r)
maxSubarraySumLeft = max(left.maxSubarraySumLeft,
left.summ + right.maxSubarraySumLeft)
maxSubarraySumRight = max(
left.maxSubarraySumRight + right.summ, right.maxSubarraySumRight)
maxSubarraySum = max(left.maxSubarraySumRight + right.maxSubarraySumLeft,
left.maxSubarraySum, right.maxSubarraySum)
summ = left.summ + right.summ
return T(summ, maxSubarraySumLeft, maxSubarraySumRight, maxSubarraySum)
return divideAndConquer(0, len(nums) - 1).maxSubarraySum
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes1.py | {
"start": 699,
"end": 778
} | class ____(E, other_keyword=2):
pass
args = [1, 2, 3]
kwargs = {"foo": 5}
| I |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/values.py | {
"start": 5286,
"end": 5685
} | class ____(RemoteValueImpl):
"""A RemoteValue that represents a mutable per-worker variable."""
def get(self):
"""Retrieve value with no caching to ensure we get the up-to-date value."""
self._wait_and_maybe_error()
return self._copy_to_local()
@tf_export("distribute.experimental.coordinator.PerWorkerValues",
"distribute.coordinator.PerWorkerValue", v1=[])
| RemoteVariable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.