repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/solarization_test.py | keras/src/layers/preprocessing/image_preprocessing/solarization_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import random
from keras.src import testing
class SolarizationTest(testing.TestCase):
def _test_input_output(self, layer, input_value, expected_value, dtype):
input = np.ones(shape=(2, 224, 224, 3), dtype=dtype) * input_value
expected_output = ops.clip(
(
np.ones(shape=(2, 224, 224, 3), dtype=layer.compute_dtype)
* expected_value
),
0,
255,
)
output = layer(input)
self.assertAllClose(output, expected_output)
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.Solarization,
init_kwargs={
"addition_factor": 0.75,
"value_range": (20, 200),
"threshold_factor": (0, 1),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
@parameterized.named_parameters(
("0_255", 0, 255),
("64_191", 64, 191),
("127_128", 127, 128),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_output_values(self, input_value, expected_value):
solarization = layers.Solarization(value_range=(0, 255))
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=expected_value,
dtype="uint8",
)
@parameterized.named_parameters(
("0_0", 0, 0),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_only_values_above_threshold_are_solarized(
self, input_value, output_value
):
solarization = layers.Solarization(
threshold_factor=(128.0 / 255.0, 128.0 / 255.0),
value_range=(0, 255),
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="uint8",
)
def test_random_augmentation_applied_per_sample(self):
image = random.uniform((16, 16, 3), minval=0, maxval=255)
images = ops.stack([image, image])
layer = layers.Solarization(
value_range=(0, 255), threshold_factor=0.5, addition_factor=0.5
)
outputs = layer(images)
self.assertNotAllClose(outputs[0], outputs[1])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_flip_test.py | keras/src/layers/preprocessing/image_preprocessing/random_flip_test.py | import unittest.mock
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src import utils
class MockedRandomFlip(layers.RandomFlip):
def call(self, inputs, training=True):
unbatched = len(inputs.shape) == 3
batch_size = 1 if unbatched else self.backend.shape(inputs)[0]
mocked_value = self.backend.numpy.full(
(batch_size, 1, 1, 1), 0.1, dtype="float32"
)
with unittest.mock.patch.object(
self.backend.random,
"uniform",
return_value=mocked_value,
):
out = super().call(inputs, training=training)
return out
class RandomFlipTest(testing.TestCase):
@parameterized.named_parameters(
("random_flip_horizontal", "horizontal"),
("random_flip_vertical", "vertical"),
("random_flip_both", "horizontal_and_vertical"),
)
def test_random_flip(self, mode):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.RandomFlip,
init_kwargs={
"mode": mode,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=run_training_check,
)
def test_random_flip_horizontal(self):
run_training_check = False if backend.backend() == "numpy" else True
utils.set_random_seed(0)
# Test 3D input: shape (1*2*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "horizontal",
"data_format": "channels_last",
"seed": 42,
},
input_data=np.asarray([[[2, 3, 4], [5, 6, 7]]]),
expected_output=backend.convert_to_tensor([[[5, 6, 7], [2, 3, 4]]]),
supports_masking=False,
run_training_check=run_training_check,
)
# Test 4D input: shape (2*1*2*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "horizontal",
"data_format": "channels_last",
"seed": 42,
},
input_data=np.asarray(
[
[[[2, 3, 4], [5, 6, 7]]],
[[[2, 3, 4], [5, 6, 7]]],
]
),
expected_output=backend.convert_to_tensor(
[
[[[5, 6, 7], [2, 3, 4]]],
[[[5, 6, 7], [2, 3, 4]]],
]
),
supports_masking=False,
run_training_check=run_training_check,
)
def test_random_flip_vertical(self):
run_training_check = False if backend.backend() == "numpy" else True
utils.set_random_seed(0)
# Test 3D input: shape (2*1*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "vertical",
"data_format": "channels_last",
"seed": 42,
},
input_data=np.asarray([[[2, 3, 4]], [[5, 6, 7]]]),
expected_output=backend.convert_to_tensor(
[[[5, 6, 7]], [[2, 3, 4]]]
),
supports_masking=False,
run_training_check=run_training_check,
)
# Test 4D input: shape (2*2*1*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "vertical",
"data_format": "channels_last",
"seed": 42,
},
input_data=np.asarray(
[
[
[[2, 3, 4]],
[[5, 6, 7]],
],
[
[[2, 3, 4]],
[[5, 6, 7]],
],
]
),
expected_output=backend.convert_to_tensor(
[
[[[5, 6, 7]], [[2, 3, 4]]],
[[[5, 6, 7]], [[2, 3, 4]]],
]
),
supports_masking=False,
run_training_check=run_training_check,
)
def test_tf_data_compatibility(self):
# Test 3D input: shape (2, 1, 3)
layer = layers.RandomFlip(
"vertical", data_format="channels_last", seed=42
)
input_data = np.array([[[2, 3, 4]], [[5, 6, 7]]])
expected_output = np.array([[[5, 6, 7]], [[2, 3, 4]]])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
# Test 4D input: shape (2, 2, 1, 3)
layer = layers.RandomFlip(
"vertical", data_format="channels_last", seed=42
)
input_data = np.array(
[
[
[[2, 3, 4]],
[[5, 6, 7]],
],
[
[[2, 3, 4]],
[[5, 6, 7]],
],
]
)
expected_output = np.array(
[
[[[5, 6, 7]], [[2, 3, 4]]],
[[[5, 6, 7]], [[2, 3, 4]]],
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
@parameterized.named_parameters(
(
"with_horizontal",
"horizontal",
[[4, 1, 6, 3], [0, 4, 2, 6]],
),
(
"with_vertical",
"vertical",
[[2, 7, 4, 9], [6, 4, 8, 6]],
),
(
"with_horizontal_and_vertical",
"horizontal_and_vertical",
[[4, 7, 6, 9], [0, 4, 2, 6]],
),
)
def test_random_flip_bounding_boxes(self, mode, expected_boxes):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
random_flip_layer = layers.RandomFlip(
mode,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"flips": np.asarray([[True]]),
"input_shape": input_image.shape,
}
output = random_flip_layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"with_horizontal",
"horizontal",
[[4, 1, 6, 3], [0, 4, 2, 6]],
),
(
"with_vertical",
"vertical",
[[2, 7, 4, 9], [6, 4, 8, 6]],
),
(
"with_horizontal_and_vertical",
"horizontal_and_vertical",
[[4, 7, 6, 9], [0, 4, 2, 6]],
),
)
def test_random_flip_tf_data_bounding_boxes(self, mode, expected_boxes):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
random_flip_layer = layers.RandomFlip(
mode,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"flips": np.asarray([[True]]),
"input_shape": input_image.shape,
}
ds = ds.map(
lambda x: random_flip_layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/solarization.py | keras/src/layers/preprocessing/image_preprocessing/solarization.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.Solarization")
class Solarization(BaseImagePreprocessingLayer):
"""Applies `(max_value - pixel + min_value)` for each pixel in the image.
When created without `threshold` parameter, the layer performs solarization
to all values. When created with specified `threshold` the layer only
augments pixels that are above the `threshold` value.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
addition_factor: (Optional) A tuple of two floats or a single float,
between 0 and 1.
For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, addition_factor)`. If specified, this value
(times the value range of input images, e.g. 255), is
added to each pixel before solarization and thresholding.
Defaults to 0.0.
threshold_factor: (Optional) A tuple of two floats or a single float.
For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, threshold_factor)`. If specified, only pixel
values above this threshold will be solarized.
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in input images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Typical values to pass
are `(0, 255)` (RGB image) or `(0., 1.)` (scaled image).
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensor with values in the range [0, 255]
solarization = Solarization(value_range=(0, 255))
images = solarization(images)
print(images[0, 0, 0])
# [196, 193, 192]
```
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
_FACTOR_VALIDATION_ERROR = (
"The `addition_factor` and `threshold_factor` arguments "
"should be a number (or a list of two numbers) "
"in the range [0, 1]. "
)
def __init__(
self,
addition_factor=0.0,
threshold_factor=0.0,
value_range=(0, 255),
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.addition_factor = self._set_factor(
addition_factor, "addition_factor"
)
self.threshold_factor = self._set_factor(
threshold_factor, "threshold_factor"
)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
lower, upper = [0, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < 0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 4:
factor_shape = (images_shape[0], 1, 1, 1)
else:
factor_shape = (1, 1, 1)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
return {
"additions": self.backend.random.uniform(
minval=self.addition_factor[0],
maxval=self.addition_factor[1] * 255,
shape=factor_shape,
seed=seed,
dtype=self.compute_dtype,
),
"thresholds": self.backend.random.uniform(
minval=self.threshold_factor[0],
maxval=self.threshold_factor[1] * 255,
shape=factor_shape,
seed=seed,
dtype=self.compute_dtype,
),
}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
if transformation is None:
return images
thresholds = transformation["thresholds"]
additions = transformation["additions"]
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
results = images + additions
results = self.backend.numpy.clip(results, 0, 255)
results = self.backend.numpy.where(
results < thresholds, results, 255 - results
)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
base_config = super().get_config()
config = {
"value_range": self.value_range,
"addition_factor": self.addition_factor,
"threshold_factor": self.threshold_factor,
"seed": self.seed,
}
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_posterization.py | keras/src/layers/preprocessing/image_preprocessing/random_posterization.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomPosterization")
class RandomPosterization(BaseImagePreprocessingLayer):
"""Reduces the number of bits for each color channel.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501)
- [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Defaults to `(0, 255)`.
factor: integer, the number of bits to keep for each channel. Must be a
value between 1-8.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (1, 8)
_MAX_FACTOR = 8
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
if self.factor[0] != self.factor[1]:
factor = self.backend.random.randint(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
dtype="uint8",
)
else:
factor = (
self.backend.numpy.ones((batch_size,), dtype="uint8")
* self.factor[0]
)
shift_factor = self._MAX_FACTOR - factor
return {"shift_factor": shift_factor}
def transform_images(self, images, transformation=None, training=True):
if training:
shift_factor = transformation["shift_factor"]
shift_factor = self.backend.numpy.reshape(
shift_factor, self.backend.shape(shift_factor) + (1, 1, 1)
)
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, "uint8")
images = self.backend.numpy.bitwise_left_shift(
self.backend.numpy.bitwise_right_shift(images, shift_factor),
shift_factor,
)
images = self.backend.cast(images, self.compute_dtype)
images = self._transform_value_range(
images,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_rotation_test.py | keras/src/layers/preprocessing/image_preprocessing/random_rotation_test.py | import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/equalization.py | keras/src/layers/preprocessing/image_preprocessing/equalization.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.Equalization")
class Equalization(BaseImagePreprocessingLayer):
"""Preprocessing layer for histogram equalization on image channels.
Histogram equalization is a technique to adjust image intensities to
enhance contrast by effectively spreading out the most frequent
intensity values. This layer applies equalization on a channel-wise
basis, which can improve the visibility of details in images.
This layer works with both grayscale and color images, performing
equalization independently on each color channel. At inference time,
the equalization is consistently applied.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
value_range: Optional list/tuple of 2 floats specifying the lower
and upper limits of the input data values. Defaults to `[0, 255]`.
If the input image has been scaled, use the appropriate range
(e.g., `[0.0, 1.0]`). The equalization will be scaled to this
range, and output values will be clipped accordingly.
bins: Integer specifying the number of histogram bins to use for
equalization. Defaults to 256, which is suitable for 8-bit images.
Larger values can provide more granular intensity redistribution.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Example:
```python
# Create an equalization layer for standard 8-bit images
equalizer = keras.layers.Equalization()
# An image with uneven intensity distribution
image = [...] # your input image
# Apply histogram equalization
equalized_image = equalizer(image)
# For images with custom value range
custom_equalizer = keras.layers.Equalization(
value_range=[0.0, 1.0], # for normalized images
bins=128 # fewer bins for more subtle equalization
)
custom_equalized = custom_equalizer(normalized_image)
```
"""
def __init__(
self, value_range=(0, 255), bins=256, data_format=None, **kwargs
):
super().__init__(**kwargs)
self.bins = bins
self._set_value_range(value_range)
self.data_format = backend.standardize_data_format(data_format)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _custom_histogram_fixed_width(self, values, value_range, nbins):
values = self.backend.cast(values, "float32")
value_min, value_max = value_range
value_min = self.backend.cast(value_min, "float32")
value_max = self.backend.cast(value_max, "float32")
scaled = (values - value_min) * (nbins - 1) / (value_max - value_min)
indices = self.backend.cast(scaled, "int32")
indices = self.backend.numpy.clip(indices, 0, nbins - 1)
flat_indices = self.backend.numpy.reshape(indices, [-1])
if backend.backend() == "jax":
# for JAX bincount is never jittable because of output shape
histogram = self.backend.numpy.zeros(nbins, dtype="int32")
for i in range(nbins):
matches = self.backend.cast(
self.backend.numpy.equal(flat_indices, i), "int32"
)
bin_count = self.backend.numpy.sum(matches)
one_hot = self.backend.cast(
self.backend.numpy.arange(nbins) == i, "int32"
)
histogram = histogram + (bin_count * one_hot)
return histogram
else:
# TensorFlow/PyTorch/NumPy implementation using bincount
return self.backend.numpy.bincount(
flat_indices,
minlength=nbins,
)
def _scale_values(self, values, source_range, target_range):
source_min, source_max = source_range
target_min, target_max = target_range
scale = (target_max - target_min) / (source_max - source_min)
offset = target_min - source_min * scale
return values * scale + offset
def _equalize_channel(self, channel, value_range):
if value_range != (0, 255):
channel = self._scale_values(channel, value_range, (0, 255))
hist = self._custom_histogram_fixed_width(
channel, value_range=(0, 255), nbins=self.bins
)
nonzero_bins = self.backend.numpy.count_nonzero(hist)
equalized = self.backend.numpy.where(
nonzero_bins <= 1, channel, self._apply_equalization(channel, hist)
)
if value_range != (0, 255):
equalized = self._scale_values(equalized, (0, 255), value_range)
return equalized
def _apply_equalization(self, channel, hist):
cdf = self.backend.numpy.cumsum(hist)
if self.backend.name == "jax":
mask = cdf > 0
first_nonzero_idx = self.backend.numpy.argmax(mask)
cdf_min = self.backend.numpy.take(cdf, first_nonzero_idx)
else:
cdf_min = self.backend.numpy.take(
cdf, self.backend.numpy.nonzero(cdf)[0][0]
)
denominator = cdf[-1] - cdf_min
denominator = self.backend.numpy.where(
denominator == 0,
self.backend.numpy.ones_like(1, dtype=denominator.dtype),
denominator,
)
lookup_table = ((cdf - cdf_min) * 255) / denominator
lookup_table = self.backend.numpy.clip(
self.backend.numpy.round(lookup_table), 0, 255
)
scaled_channel = (channel / 255.0) * (self.bins - 1)
indices = self.backend.cast(
self.backend.numpy.clip(scaled_channel, 0, self.bins - 1), "int32"
)
return self.backend.numpy.take(lookup_table, indices)
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
if self.data_format == "channels_first":
channels = []
for i in range(self.backend.core.shape(images)[-3]):
channel = images[..., i, :, :]
equalized = self._equalize_channel(
channel, self.value_range
)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-3)
else:
channels = []
for i in range(self.backend.core.shape(images)[-1]):
channel = images[..., i]
equalized = self._equalize_channel(
channel, self.value_range
)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-1)
return self.backend.cast(equalized_images, self.compute_dtype)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/cut_mix.py | keras/src/layers/preprocessing/image_preprocessing/cut_mix.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.CutMix")
class CutMix(BaseImagePreprocessingLayer):
"""CutMix data augmentation technique.
CutMix is a data augmentation method where patches are cut and pasted
between two images in the dataset, while the labels are also mixed
proportionally to the area of the patches.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [CutMix paper]( https://arxiv.org/abs/1905.04899).
Args:
factor: A single float or a tuple of two floats between 0 and 1.
If a tuple of numbers is passed, a `factor` is sampled
between the two values.
If a single float is passed, a value between 0 and the passed
float is sampled. These values define the range from which the
mixing weight is sampled. A higher factor increases the variability
in patch sizes, leading to more diverse and larger mixed patches.
Defaults to 1.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(self, factor=1.0, seed=None, data_format=None, **kwargs):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.seed = seed
self.generator = SeedGenerator(seed)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 3:
return None
batch_size = images_shape[0]
image_height = images_shape[self.height_axis]
image_width = images_shape[self.width_axis]
seed = seed or self._get_seed_generator(self.backend._backend)
mix_weight = self._generate_mix_weight(batch_size, seed)
ratio = self.backend.numpy.sqrt(1.0 - mix_weight)
x0, x1 = self._compute_crop_bounds(batch_size, image_width, ratio, seed)
y0, y1 = self._compute_crop_bounds(
batch_size, image_height, ratio, seed
)
batch_masks, mix_weight = self._generate_batch_mask(
images_shape,
(x0, x1, y0, y1),
)
permutation_order = self.backend.random.shuffle(
self.backend.numpy.arange(0, batch_size, dtype="int32"),
seed=seed,
)
return {
"permutation_order": permutation_order,
"batch_masks": batch_masks,
"mix_weight": mix_weight,
}
def _generate_batch_mask(self, images_shape, box_corners):
def _generate_grid_xy(image_height, image_width):
grid_y, grid_x = self.backend.numpy.meshgrid(
self.backend.numpy.arange(
image_height, dtype=self.compute_dtype
),
self.backend.numpy.arange(
image_width, dtype=self.compute_dtype
),
indexing="ij",
)
if self.data_format == "channels_last":
grid_y = self.backend.cast(
grid_y[None, :, :, None], dtype=self.compute_dtype
)
grid_x = self.backend.cast(
grid_x[None, :, :, None], dtype=self.compute_dtype
)
else:
grid_y = self.backend.cast(
grid_y[None, None, :, :], dtype=self.compute_dtype
)
grid_x = self.backend.cast(
grid_x[None, None, :, :], dtype=self.compute_dtype
)
return grid_x, grid_y
image_height, image_width = (
images_shape[self.height_axis],
images_shape[self.width_axis],
)
grid_x, grid_y = _generate_grid_xy(image_height, image_width)
x0, x1, y0, y1 = box_corners
x0 = x0[:, None, None, None]
y0 = y0[:, None, None, None]
x1 = x1[:, None, None, None]
y1 = y1[:, None, None, None]
batch_masks = (
(grid_x >= x0) & (grid_x < x1) & (grid_y >= y0) & (grid_y < y1)
)
batch_masks = self.backend.numpy.repeat(
batch_masks, images_shape[self.channel_axis], axis=self.channel_axis
)
mix_weight = 1.0 - (x1 - x0) * (y1 - y0) / (image_width * image_height)
return batch_masks, mix_weight
def _compute_crop_bounds(self, batch_size, image_length, crop_ratio, seed):
crop_length = self.backend.cast(
crop_ratio * image_length, dtype=self.compute_dtype
)
start_pos = self.backend.random.uniform(
shape=[batch_size],
minval=0,
maxval=1,
dtype=self.compute_dtype,
seed=seed,
) * (image_length - crop_length)
end_pos = start_pos + crop_length
return start_pos, end_pos
def _generate_mix_weight(self, batch_size, seed):
alpha = (
self.backend.random.uniform(
shape=(),
minval=self.factor[0],
maxval=self.factor[1],
dtype=self.compute_dtype,
seed=seed,
)
+ 1e-6
)
mix_weight = self.backend.random.beta(
(batch_size,), alpha, alpha, seed=seed, dtype=self.compute_dtype
)
return mix_weight
def transform_images(self, images, transformation=None, training=True):
if training and transformation is not None:
images = self.backend.cast(images, self.compute_dtype)
permutation_order = transformation["permutation_order"]
batch_masks = transformation["batch_masks"]
images = self.backend.numpy.where(
batch_masks,
self.backend.numpy.take(images, permutation_order, axis=0),
images,
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
if training and transformation is not None:
permutation_order = transformation["permutation_order"]
mix_weight = transformation["mix_weight"]
cutout_labels = self.backend.numpy.take(
labels, permutation_order, axis=0
)
mix_weight = self.backend.numpy.reshape(mix_weight, [-1, 1])
labels = mix_weight * labels + (1.0 - mix_weight) * cutout_labels
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
raise NotImplementedError()
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/resizing.py | keras/src/layers/preprocessing/image_preprocessing/resizing.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.Resizing")
class Resizing(BaseImagePreprocessingLayer):
"""A preprocessing layer which resizes images.
This layer resizes an image input to a target height and width. The input
should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
format. Input pixel values can be of any range
(e.g. `[0., 1.)` or `[0, 255]`).
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"lanczos3"`, `"lanczos5"`. Defaults to `"bilinear"`.
crop_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
cropped so as to return the
largest possible window in the image (of size `(height, width)`)
that matches the target aspect ratio. By default
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
pad_to_aspect_ratio: If `True`, pad the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
evenly padded on the short side.
fill_mode: When using `pad_to_aspect_ratio=True`, padded areas
are filled according to the given mode. Only `"constant"` is
supported at this time
(fill with constant value, equal to `fill_value`).
fill_value: Float. Padding value to use when `pad_to_aspect_ratio=True`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
_USE_BASE_FACTOR = False
def __init__(
self,
height,
width,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
antialias=False,
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.height = height
self.width = width
self.interpolation = interpolation
self.data_format = backend.standardize_data_format(data_format)
self.crop_to_aspect_ratio = crop_to_aspect_ratio
self.pad_to_aspect_ratio = pad_to_aspect_ratio
self.fill_mode = fill_mode
self.fill_value = fill_value
self.antialias = bool(antialias)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
elif self.data_format == "channels_last":
self.height_axis = -3
self.width_axis = -2
def transform_images(self, images, transformation=None, training=True):
size = (self.height, self.width)
resized = self.backend.image.resize(
images,
size=size,
interpolation=self.interpolation,
antialias=self.antialias,
data_format=self.data_format,
crop_to_aspect_ratio=self.crop_to_aspect_ratio,
pad_to_aspect_ratio=self.pad_to_aspect_ratio,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if resized.dtype == images.dtype:
return resized
if backend.is_int_dtype(images.dtype):
resized = self.backend.numpy.round(resized)
return _saturate_cast(resized, images.dtype, self.backend)
def transform_segmentation_masks(
self, segmentation_masks, transformation=None, training=True
):
return self.transform_images(segmentation_masks)
def transform_labels(self, labels, transformation=None, training=True):
return labels
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
input_shape = self.backend.shape(data["images"])
else:
input_shape = self.backend.shape(data)
input_height, input_width = (
input_shape[self.height_axis],
input_shape[self.width_axis],
)
return input_height, input_width
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
ops = self.backend
input_height, input_width = transformation
mask_negative_1s = ops.numpy.all(bounding_boxes["boxes"] == -1, axis=-1)
mask_zeros = ops.numpy.all(bounding_boxes["boxes"] == 0, axis=-1)
boxes_mask = ops.numpy.logical_or(mask_negative_1s, mask_zeros)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=input_height,
width=input_width,
)
bounding_boxes["boxes"] = self._transform_xyxy(
bounding_boxes["boxes"],
input_height=input_height,
input_width=input_width,
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=self.height,
width=self.width,
)
bounding_boxes["boxes"] = ops.numpy.where(
ops.numpy.expand_dims(boxes_mask, axis=-1),
ops.convert_to_tensor(
[0.0, 0.0, 0.0, 0.0], dtype=bounding_boxes["boxes"].dtype
),
bounding_boxes["boxes"],
)
bounding_boxes = convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
height=self.height,
width=self.width,
)
return bounding_boxes
def _transform_xyxy(self, boxes, input_height, input_width):
ops = self.backend
input_height = ops.cast(input_height, dtype=boxes.dtype)
input_width = ops.cast(input_width, dtype=boxes.dtype)
if self.pad_to_aspect_ratio:
return self._transform_boxes_pad_to_aspect_ratio(
boxes, input_height, input_width
)
elif self.crop_to_aspect_ratio:
return self._transform_boxes_crop_to_aspect_ratio(
boxes, input_height, input_width
)
else:
return self._transform_boxes_stretch(
boxes, input_height, input_width
)
def _transform_boxes_pad_to_aspect_ratio(
self, boxes, input_height, input_width
):
"""Transforms bounding boxes for padding to aspect ratio."""
ops = self.backend
height_ratio = ops.cast(self.height / input_height, dtype=boxes.dtype)
width_ratio = ops.cast(self.width / input_width, dtype=boxes.dtype)
min_aspect_ratio = ops.numpy.minimum(height_ratio, width_ratio)
y_offset = (self.height - input_height * min_aspect_ratio) // 2
x_offset = (self.width - input_width * min_aspect_ratio) // 2
return ops.numpy.stack(
[
boxes[..., 0] * min_aspect_ratio + x_offset,
boxes[..., 1] * min_aspect_ratio + y_offset,
boxes[..., 2] * min_aspect_ratio + x_offset,
boxes[..., 3] * min_aspect_ratio + y_offset,
],
axis=-1,
)
def _transform_boxes_crop_to_aspect_ratio(
self, boxes, input_height, input_width
):
"""Transforms bounding boxes for cropping to aspect ratio."""
ops = self.backend
source_aspect_ratio = input_width / input_height
target_aspect_ratio = self.width / self.height
new_width = ops.numpy.where(
source_aspect_ratio > target_aspect_ratio,
self.height * source_aspect_ratio,
self.width,
)
new_height = ops.numpy.where(
source_aspect_ratio > target_aspect_ratio,
self.height,
self.width / source_aspect_ratio,
)
scale_x = new_width / input_width
scale_y = new_height / input_height
crop_left = (new_width - self.width) // 2
crop_top = (new_height - self.height) // 2
return ops.numpy.stack(
[
boxes[..., 0] * scale_x - crop_left,
boxes[..., 1] * scale_y - crop_top,
boxes[..., 2] * scale_x - crop_left,
boxes[..., 3] * scale_y - crop_top,
],
axis=-1,
)
def _transform_boxes_stretch(self, boxes, input_height, input_width):
"""Transforms bounding boxes by simple stretching."""
ops = self.backend
height_ratio = ops.cast(self.height / input_height, dtype=boxes.dtype)
width_ratio = ops.cast(self.width / input_width, dtype=boxes.dtype)
return ops.numpy.stack(
[
boxes[..., 0] * width_ratio,
boxes[..., 1] * height_ratio,
boxes[..., 2] * width_ratio,
boxes[..., 3] * height_ratio,
],
axis=-1,
)
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
input_shape[2] = self.width
else:
input_shape[2] = self.height
input_shape[3] = self.width
else:
if self.data_format == "channels_last":
input_shape[0] = self.height
input_shape[1] = self.width
else:
input_shape[1] = self.height
input_shape[2] = self.width
return tuple(input_shape)
def get_config(self):
base_config = super().get_config()
config = {
"height": self.height,
"width": self.width,
"interpolation": self.interpolation,
"crop_to_aspect_ratio": self.crop_to_aspect_ratio,
"pad_to_aspect_ratio": self.pad_to_aspect_ratio,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"antialias": self.antialias,
"data_format": self.data_format,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_translation_test.py | keras/src/layers/preprocessing/image_preprocessing/random_translation_test.py | import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.utils import backend_utils
class RandomTranslationTest(testing.TestCase):
@parameterized.named_parameters(
("random_translate_4_by_6", 0.4, 0.6),
("random_translate_3_by_2", 0.3, 0.2),
("random_translate_tuple_factor", (-0.5, 0.4), (0.2, 0.3)),
)
def test_random_translation(self, height_factor, width_factor):
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": height_factor,
"width_factor": width_factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
@parameterized.named_parameters(
("bad_len", [0.1, 0.2, 0.3], 0.0),
("bad_type", {"dummy": 0.3}, 0.0),
("exceed_range_single", -1.1, 0.0),
("exceed_range_tuple", (-1.1, 0.0), 0.0),
)
def test_random_translation_with_bad_factor(
self, height_factor, width_factor
):
with self.assertRaises(ValueError):
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": height_factor,
"width_factor": width_factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_translation_with_inference_mode(self):
input_data = np.random.random((1, 4, 4, 3))
expected_output = input_data
layer = layers.RandomTranslation(0.2, 0.1)
output = layer(input_data, training=False)
self.assertAllClose(output, expected_output)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_up_numeric_reflect(self, data_format):
input_image = np.arange(0, 25)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": (-0.2, -0.2),
"width_factor": 0.0,
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_up_numeric_constant(self, data_format):
input_image = np.arange(0, 25).astype("float32")
# Shifting by -.2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1)), dtype="float32"
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5)), dtype="float32"
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": (-0.2, -0.2),
"width_factor": 0.0,
"fill_mode": "constant",
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_down_numeric_reflect(self, data_format):
input_image = np.arange(0, 25)
# Shifting by .2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": (0.2, 0.2),
"width_factor": 0.0,
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_asymmetric_size_numeric_reflect(
self, data_format
):
input_image = np.arange(0, 16)
# Shifting by .2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 8, 2, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 8, 2, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 8, 2))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 8, 2))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": (0.5, 0.5),
"width_factor": 0.0,
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_down_numeric_constant(self, data_format):
input_image = np.arange(0, 25)
# Shifting by .2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": (0.2, 0.2),
"width_factor": 0.0,
"fill_mode": "constant",
"fill_value": 0.0,
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_left_numeric_reflect(self, data_format):
input_image = np.arange(0, 25)
# Shifting by .2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": 0.0,
"width_factor": (-0.2, -0.2),
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters(["channels_first", "channels_last"])
def test_random_translation_left_numeric_constant(self, data_format):
input_image = np.arange(0, 25)
# Shifting by .2 * 5 = 1 pixel.
expected_output = np.asarray(
[
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0],
]
)
if data_format == "channels_last":
input_image = np.reshape(input_image, (1, 5, 5, 1))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 5, 5, 1))
)
else:
input_image = np.reshape(input_image, (1, 1, 5, 5))
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, (1, 1, 5, 5))
)
self.run_layer_test(
layers.RandomTranslation,
init_kwargs={
"height_factor": 0.0,
"width_factor": (-0.2, -0.2),
"fill_mode": "constant",
"fill_value": 0.0,
"data_format": data_format,
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
layer = layers.RandomTranslation(0.2, 0.1)
input_data = np.random.random((1, 4, 4, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(1).map(layer)
next(iter(ds)).numpy()
@parameterized.named_parameters(
(
"with_positive_shift",
[[1.0, 2.0]],
[[3.0, 3.0, 5.0, 5.0], [7.0, 6.0, 8.0, 8.0]],
),
(
"with_negative_shift",
[[-1.0, -2.0]],
[[1.0, 0.0, 3.0, 1.0], [5.0, 2.0, 7.0, 4.0]],
),
)
def test_random_flip_bounding_boxes(self, translation, expected_boxes):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
random_translation_layer = layers.RandomTranslation(
height_factor=0.5,
width_factor=0.5,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"translations": backend_utils.convert_tf_tensor(
np.array(translation)
),
"input_shape": image_shape,
}
output = random_translation_layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"with_positive_shift",
[[1.0, 2.0]],
[[3.0, 3.0, 5.0, 5.0], [7.0, 6.0, 8.0, 8.0]],
),
(
"with_negative_shift",
[[-1.0, -2.0]],
[[1.0, 0.0, 3.0, 1.0], [5.0, 2.0, 7.0, 4.0]],
),
)
def test_random_flip_tf_data_bounding_boxes(
self, translation, expected_boxes
):
data_format = backend.config.image_data_format()
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
random_translation_layer = layers.RandomTranslation(
height_factor=0.5,
width_factor=0.5,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"translations": np.array(translation),
"input_shape": image_shape,
}
ds = ds.map(
lambda x: random_translation_layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_contrast_test.py | keras/src/layers/preprocessing/image_preprocessing/random_contrast_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomContrastTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"value_range": (0, 255),
"seed": 1,
"data_format": "channels_first",
},
input_shape=(8, 3, 4, 4),
supports_masking=False,
expected_output_shape=(8, 3, 4, 4),
)
def test_random_contrast_with_value_range_0_to_255(self):
seed = 9809
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
height_axis = -3
width_axis = -2
else:
inputs = np.random.random((12, 3, 8, 16))
height_axis = -2
width_axis = -1
inputs = backend.convert_to_tensor(inputs, dtype="float32")
layer = layers.RandomContrast(
factor=0.5, value_range=(0, 255), seed=seed
)
transformation = layer.get_random_transformation(inputs, training=True)
outputs = layer.transform_images(inputs, transformation, training=True)
# Actual contrast arithmetic
np.random.seed(seed)
factor = backend.convert_to_numpy(transformation["contrast_factor"])
inputs = backend.convert_to_numpy(inputs)
inp_mean = np.mean(inputs, axis=height_axis, keepdims=True)
inp_mean = np.mean(inp_mean, axis=width_axis, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(actual_outputs, 0, 255)
self.assertAllClose(outputs, actual_outputs)
def test_random_contrast_with_value_range_0_to_1(self):
seed = 9809
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
height_axis = -3
width_axis = -2
else:
inputs = np.random.random((12, 3, 8, 16))
height_axis = -2
width_axis = -1
inputs = backend.convert_to_tensor(inputs, dtype="float32")
layer = layers.RandomContrast(factor=0.5, value_range=(0, 1), seed=seed)
transformation = layer.get_random_transformation(inputs, training=True)
outputs = layer.transform_images(inputs, transformation, training=True)
# Actual contrast arithmetic
np.random.seed(seed)
factor = backend.convert_to_numpy(transformation["contrast_factor"])
inputs = backend.convert_to_numpy(inputs)
inp_mean = np.mean(inputs, axis=height_axis, keepdims=True)
inp_mean = np.mean(inp_mean, axis=width_axis, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(actual_outputs, 0, 1)
self.assertAllClose(outputs, actual_outputs)
def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
next(iter(ds)).numpy()
def test_dict_input(self):
layer = layers.RandomContrast(factor=0.1, bounding_box_format="xyxy")
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertAllClose(
data["bounding_boxes"]["boxes"],
transformed_data["bounding_boxes"]["boxes"],
)
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_translation.py | keras/src/layers/preprocessing/image_preprocessing/random_translation.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandomTranslation")
class RandomTranslation(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly translates images during training.
This layer will apply random translations to each image during training,
filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in
an output height shifted by a random amount in the range
`[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting horizontally.
A negative value means shifting image left, while a positive value
means shifting image right. When represented as a single positive
float, this value is used for both the upper and lower bound. For
instance, `width_factor=(-0.2, 0.3)` results in an output shifted
left by 20%, and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
_USE_BASE_FACTOR = False
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.height_factor = height_factor
self.height_lower, self.height_upper = self._set_factor(
height_factor, "height_factor"
)
self.width_factor = width_factor
self.width_lower, self.width_upper = self._set_factor(
width_factor, "width_factor"
)
if fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.supports_jit = False
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
lower, upper = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self._translate_inputs(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def get_transformed_x_y(self, x, y, transform):
a0, a1, a2, b0, b1, b2, c0, c1 = self.backend.numpy.split(
transform, 8, axis=-1
)
k = c0 * x + c1 * y + 1
x_transformed = (a0 * x + a1 * y + a2) / k
y_transformed = (b0 * x + b1 * y + b2) / k
return x_transformed, y_transformed
def get_shifted_bbox(self, bounding_boxes, w_shift_factor, h_shift_factor):
bboxes = bounding_boxes["boxes"]
x1, x2, x3, x4 = self.backend.numpy.split(bboxes, 4, axis=-1)
w_shift_factor = self.backend.convert_to_tensor(
w_shift_factor, dtype=x1.dtype
)
h_shift_factor = self.backend.convert_to_tensor(
h_shift_factor, dtype=x1.dtype
)
if len(bboxes.shape) == 3:
w_shift_factor = self.backend.numpy.expand_dims(w_shift_factor, -1)
h_shift_factor = self.backend.numpy.expand_dims(h_shift_factor, -1)
bounding_boxes["boxes"] = self.backend.numpy.concatenate(
[
x1 - w_shift_factor,
x2 - h_shift_factor,
x3 - w_shift_factor,
x4 - h_shift_factor,
],
axis=-1,
)
return bounding_boxes
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
input_height, input_width = (
transformation["input_shape"][height_axis],
transformation["input_shape"][width_axis],
)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=input_height,
width=input_width,
)
translations = transformation["translations"]
transform = self._get_translation_matrix(translations)
w_shift_factor, h_shift_factor = self.get_transformed_x_y(
0, 0, transform
)
bounding_boxes = self.get_shifted_bbox(
bounding_boxes, w_shift_factor, h_shift_factor
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
height=input_height,
width=input_width,
)
self.backend.reset()
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
unbatched = len(images_shape) == 3
if unbatched:
images_shape = self.backend.shape(images)
batch_size = 1
else:
batch_size = images_shape[0]
if self.data_format == "channels_first":
height = images_shape[-2]
width = images_shape[-1]
else:
height = images_shape[-3]
width = images_shape[-2]
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
height_translate = self.backend.random.uniform(
minval=self.height_lower,
maxval=self.height_upper,
shape=[batch_size, 1],
seed=seed,
)
height_translate = self.backend.numpy.multiply(height_translate, height)
width_translate = self.backend.random.uniform(
minval=self.width_lower,
maxval=self.width_upper,
shape=[batch_size, 1],
seed=seed,
)
width_translate = self.backend.numpy.multiply(width_translate, width)
translations = self.backend.cast(
self.backend.numpy.concatenate(
[width_translate, height_translate], axis=1
),
dtype="float32",
)
return {"translations": translations, "input_shape": images_shape}
def _translate_inputs(self, inputs, transformation):
if transformation is None:
return inputs
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
translations = transformation["translations"]
outputs = self.backend.image.affine_transform(
inputs,
transform=self._get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def _get_translation_matrix(self, translations):
num_translations = self.backend.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# translation matrices are always float32.
return self.backend.numpy.concatenate(
[
self.backend.numpy.ones((num_translations, 1)),
self.backend.numpy.zeros((num_translations, 1)),
-translations[:, 0:1],
self.backend.numpy.zeros((num_translations, 1)),
self.backend.numpy.ones((num_translations, 1)),
-translations[:, 1:],
self.backend.numpy.zeros((num_translations, 2)),
],
axis=1,
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"interpolation": self.interpolation,
"seed": self.seed,
"fill_value": self.fill_value,
"data_format": self.data_format,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_zoom.py | keras/src/layers/preprocessing/image_preprocessing/random_zoom.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandomZoom")
class RandomZoom(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly zooms images during training.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically.
When represented as a single float, this value is used for both the
upper and lower bound. A positive value means zooming out, while a
negative value means zooming in. For instance,
`height_factor=(0.2, 0.3)` result in an output zoomed out by a
random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a
random amount in the range `[+20%, +30%]`.
width_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming horizontally.
When represented as a single float, this value is used for both the
upper and lower bound. For instance, `width_factor=(0.2, 0.3)`
result in an output zooming out between 20% to 30%.
`width_factor=(-0.3, -0.2)` result in an output zooming in between
20% to 30%. `None` means i.e., zooming vertical and horizontal
directions by preserving the aspect ratio. Defaults to `None`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"reflect"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float that represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
"""
_USE_BASE_FACTOR = False
_FACTOR_VALIDATION_ERROR = (
"The `height_factor` and `width_factor` arguments "
"should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_factor = height_factor
self.height_lower, self.height_upper = self._set_factor(
height_factor, "height_factor"
)
self.width_factor = width_factor
if width_factor is not None:
self.width_lower, self.width_upper = self._set_factor(
width_factor, "width_factor"
)
if fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
self.supports_jit = False
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
lower, upper = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self._zoom_inputs(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def get_transformed_x_y(self, x, y, transform):
a0, a1, a2, b0, b1, b2, c0, c1 = self.backend.numpy.split(
transform, 8, axis=-1
)
k = c0 * x + c1 * y + 1
x_transformed = (a0 * x + a1 * y + a2) / k
y_transformed = (b0 * x + b1 * y + b2) / k
return x_transformed, y_transformed
def get_clipped_bbox(self, bounding_boxes, h_end, h_start, w_end, w_start):
bboxes = bounding_boxes["boxes"]
x1, y1, x2, y2 = self.backend.numpy.split(bboxes, 4, axis=-1)
if len(bboxes.shape) == 3:
h_end = self.backend.numpy.expand_dims(h_end, -1)
h_start = self.backend.numpy.expand_dims(h_start, -1)
w_end = self.backend.numpy.expand_dims(w_end, -1)
w_start = self.backend.numpy.expand_dims(w_start, -1)
x1 = self.backend.numpy.clip(x1, w_start, w_end) - w_start
y1 = self.backend.numpy.clip(y1, h_start, h_end) - h_start
x2 = self.backend.numpy.clip(x2, w_start, w_end) - w_start
y2 = self.backend.numpy.clip(y2, h_start, h_end) - h_start
bounding_boxes["boxes"] = self.backend.numpy.concatenate(
[x1, y1, x2, y2], axis=-1
)
return bounding_boxes
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
width_zoom = transformation["width_zoom"]
height_zoom = transformation["height_zoom"]
inputs_shape = transformation["input_shape"]
if self.data_format == "channels_first":
height = inputs_shape[-2]
width = inputs_shape[-1]
else:
height = inputs_shape[-3]
width = inputs_shape[-2]
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=height,
width=width,
)
zooms = self.backend.cast(
self.backend.numpy.concatenate(
[width_zoom, height_zoom], axis=1
),
dtype="float32",
)
transform = self._get_zoom_matrix(zooms, height, width)
w_start, h_start = self.get_transformed_x_y(
0,
0,
transform,
)
w_end, h_end = self.get_transformed_x_y(
width,
height,
transform,
)
bounding_boxes = self.get_clipped_bbox(
bounding_boxes, h_end, h_start, w_end, w_start
)
height_transformed = h_end - h_start
width_transformed = w_end - w_start
height_transformed = self.backend.numpy.expand_dims(
height_transformed, -1
)
width_transformed = self.backend.numpy.expand_dims(
width_transformed, -1
)
bounding_boxes = convert_format(
bounding_boxes,
source="xyxy",
target="rel_xyxy",
height=height_transformed,
width=width_transformed,
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=height_transformed,
width=width_transformed,
bounding_box_format="rel_xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
height=height,
width=width,
)
self.backend.reset()
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 4:
zoom_factor_shape = (images_shape[0], 1)
else:
zoom_factor_shape = (1, 1)
if not training:
return {
"height_zoom": self.backend.numpy.zeros(zoom_factor_shape),
"width_zoom": self.backend.numpy.zeros(zoom_factor_shape),
}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
height_zoom = self.backend.random.uniform(
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
shape=zoom_factor_shape,
seed=seed,
)
if self.width_factor is not None:
width_zoom = self.backend.random.uniform(
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
shape=zoom_factor_shape,
seed=seed,
)
else:
width_zoom = height_zoom
return {
"height_zoom": height_zoom,
"width_zoom": width_zoom,
"input_shape": images_shape,
}
def _zoom_inputs(self, inputs, transformation):
if transformation is None:
return inputs
width_zoom = transformation["width_zoom"]
height_zoom = transformation["height_zoom"]
zooms = self.backend.cast(
self.backend.numpy.concatenate([width_zoom, height_zoom], axis=1),
dtype="float32",
)
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
inputs_shape = self.backend.shape(inputs)
if self.data_format == "channels_first":
height = inputs_shape[-2]
width = inputs_shape[-1]
else:
height = inputs_shape[-3]
width = inputs_shape[-2]
outputs = self.backend.image.affine_transform(
inputs,
transform=self._get_zoom_matrix(zooms, height, width),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def _get_zoom_matrix(self, zooms, image_height, image_width):
num_zooms = self.backend.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# zoom matrices are always float32.
x_offset = ((self.backend.cast(image_width, "float32") - 1.0) / 2.0) * (
1.0 - zooms[:, 0:1]
)
y_offset = (
(self.backend.cast(image_height, "float32") - 1.0) / 2.0
) * (1.0 - zooms[:, 1:])
return self.backend.numpy.concatenate(
[
zooms[:, 0:1],
self.backend.numpy.zeros((num_zooms, 1)),
x_offset,
self.backend.numpy.zeros((num_zooms, 1)),
zooms[:, 1:],
y_offset,
self.backend.numpy.zeros((num_zooms, 2)),
],
axis=1,
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"interpolation": self.interpolation,
"seed": self.seed,
"fill_value": self.fill_value,
"data_format": self.data_format,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_perspective_test.py | keras/src/layers/preprocessing/image_preprocessing/random_perspective_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomPerspectiveTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomPerspective,
init_kwargs={
"factor": 1.0,
"scale": 0.5,
"interpolation": "bilinear",
"fill_value": 0,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_perspective_inference(self):
seed = 3481
layer = layers.RandomPerspective()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_perspective_no_op(self):
seed = 3481
layer = layers.RandomPerspective(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_perspective_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.ones((4, 4, 1))
expected_output = np.asarray(
[
[[1.0], [1.0], [0.0], [0.0]],
[[1.0], [1.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0]],
],
)
else:
inputs = np.ones((1, 4, 4))
expected_output = np.array(
[
[
[1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
]
)
layer = layers.RandomPerspective(data_format=data_format)
transformation = {
"apply_perspective": np.array([True]),
"start_points": np.array(
[[[0.0, 0.0], [3.0, 0.0], [0.0, 3.0], [3.0, 3.0]]]
),
"end_points": np.array([[[0.0, 0.0], [1, 0.0], [0.0, 1], [1, 1]]]),
"input_shape": np.array((4, 4, 1)),
}
output = layer.transform_images(inputs, transformation)
self.assertAllClose(expected_output, output, atol=1e-4, rtol=1e-4)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomPerspective(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
@parameterized.named_parameters(
(
"with_large_scale",
[
[
[0.0, 0.0],
[8.151311, 0.0],
[0.0, 12.695701],
[9.2712054, 10.524198],
]
],
[
[
[2.6490488, 1.1149256, 5.2026834, 3.6187303],
[7.5547166, 4.2492595, 8.0, 6.869391],
]
],
),
(
"with_small_scale",
[
[
[0.0, 0.0],
[4.151311, 0.0],
[0.0, 6.695701],
[4.2712054, 7.524198],
]
],
[
[
[1.095408, 0.7504317, 2.2761598, 2.3389952],
[3.5416048, 3.2349987, 4.920989, 5.0568376],
]
],
),
)
def test_random_perspective_bounding_boxes(
self, end_points, expected_boxes
):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
layer = layers.RandomPerspective(
# data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"apply_perspective": np.array([True]),
"end_points": np.array(end_points),
"input_shape": np.array(image_shape),
"start_points": np.array(
[[[0.0, 0.0], [7.0, 0.0], [0.0, 9.0], [7.0, 9.0]]]
),
}
output = layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation,
)
self.assertAllClose(
output["boxes"],
expected_boxes,
atol=1e-3,
rtol=1e-3,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
@parameterized.named_parameters(
(
"with_large_scale",
[
[
[0.0, 0.0],
[8.151311, 0.0],
[0.0, 12.695701],
[9.2712054, 10.524198],
]
],
[
[
[2.6490488, 1.1149256, 5.2026834, 3.6187303],
[7.5547166, 4.2492595, 8.0, 6.869391],
]
],
),
(
"with_small_scale",
[
[
[0.0, 0.0],
[4.151311, 0.0],
[0.0, 6.695701],
[4.2712054, 7.524198],
]
],
[
[
[1.095408, 0.7504317, 2.2761598, 2.3389952],
[3.5416048, 3.2349987, 4.920989, 5.0568376],
]
],
),
)
def test_random_flip_tf_data_bounding_boxes(
self, end_points, expected_boxes
):
data_format = backend.config.image_data_format()
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
layer = layers.RandomPerspective(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"apply_perspective": np.array([True]),
"end_points": np.array(end_points),
"input_shape": np.array(image_shape),
"start_points": np.array(
[[[0.0, 0.0], [7.0, 0.0], [0.0, 9.0], [7.0, 9.0]]]
),
}
ds = ds.map(
lambda x: layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(
output["boxes"], expected_boxes, atol=1e-3, rtol=1e-3
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur_test.py | keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.backend import convert_to_tensor
class RandomGaussianBlurTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomGaussianBlur,
init_kwargs={
"factor": 1.0,
"kernel_size": 3,
"sigma": 0,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_erasing_inference(self):
seed = 3481
layer = layers.RandomGaussianBlur()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_erasing_no_op(self):
seed = 3481
layer = layers.RandomGaussianBlur(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_erasing_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.ones((1, 2, 2, 3))
expected_output = np.asarray(
[
[
[[0.7273, 0.7273, 0.7273], [0.7273, 0.7273, 0.7273]],
[[0.7273, 0.7273, 0.7273], [0.7273, 0.7273, 0.7273]],
]
]
)
else:
inputs = np.ones((1, 3, 2, 2))
expected_output = np.asarray(
[
[
[[0.7273, 0.7273], [0.7273, 0.7273]],
[[0.7273, 0.7273], [0.7273, 0.7273]],
[[0.7273, 0.7273], [0.7273, 0.7273]],
]
]
)
layer = layers.RandomGaussianBlur(data_format=data_format)
transformation = {
"blur_factor": convert_to_tensor([0.3732, 0.8654]),
"should_apply_blur": convert_to_tensor([True]),
}
output = layer.transform_images(inputs, transformation)
self.assertAllClose(
expected_output,
output,
atol=1e-4,
rtol=1e-4,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomGaussianBlur(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_rotation.py | keras/src/layers/preprocessing/image_preprocessing/random_rotation.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import (
converters,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomRotation")
class RandomRotation(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, pass `training=True` when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating
counter clock-wise,
while a negative value means clock-wise.
When represented as a single
float, this value is used for both the upper and lower bound.
For instance, `factor=(-0.2, 0.3)`
results in an output rotation by a random
amount in the range `[-20% * 360, 30% * 360]`.
`factor=0.2` results in an
output rotating by a random amount
in the range `[-20% * 360, 20% * 360]`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about
the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by
filling all values beyond the edge with
the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
"""
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(factor=factor, data_format=data_format, **kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.fill_mode = fill_mode
self.interpolation = interpolation
self.fill_value = fill_value
self.supports_jit = False
if self.fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if self.interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self.backend.image.affine_transform(
images=images,
transform=transformation["rotation_matrix"],
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training:
ops = self.backend
boxes = bounding_boxes["boxes"]
height = transformation["image_height"]
width = transformation["image_width"]
batch_size = transformation["batch_size"]
boxes = converters.affine_transform(
boxes=boxes,
angle=transformation["angle"],
translate_x=ops.numpy.zeros([batch_size]),
translate_y=ops.numpy.zeros([batch_size]),
scale=ops.numpy.ones([batch_size]),
shear_x=ops.numpy.zeros([batch_size]),
shear_y=ops.numpy.zeros([batch_size]),
height=height,
width=width,
)
bounding_boxes["boxes"] = boxes
bounding_boxes = converters.clip_to_image_size(
bounding_boxes,
height=height,
width=width,
bounding_box_format="xyxy",
)
bounding_boxes = converters.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
height=height,
width=width,
)
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def get_random_transformation(self, data, training=True, seed=None):
ops = self.backend
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
shape = ops.core.shape(images)
if len(shape) == 4:
batch_size = shape[0]
if self.data_format == "channels_last":
image_height = shape[1]
image_width = shape[2]
else:
image_height = shape[2]
image_width = shape[3]
else:
batch_size = 1
if self.data_format == "channels_last":
image_height = shape[0]
image_width = shape[1]
else:
image_height = shape[1]
image_width = shape[2]
if seed is None:
seed = self._get_seed_generator(ops._backend)
lower = self.factor[0] * 360.0
upper = self.factor[1] * 360.0
angle = ops.random.uniform(
shape=(batch_size,),
minval=lower,
maxval=upper,
seed=seed,
)
center_x, center_y = 0.5, 0.5
rotation_matrix = self._compute_affine_matrix(
center_x=center_x,
center_y=center_y,
angle=angle,
translate_x=ops.numpy.zeros([batch_size]),
translate_y=ops.numpy.zeros([batch_size]),
scale=ops.numpy.ones([batch_size]),
shear_x=ops.numpy.zeros([batch_size]),
shear_y=ops.numpy.zeros([batch_size]),
height=image_height,
width=image_width,
)
if len(shape) == 3:
rotation_matrix = self.backend.numpy.squeeze(
rotation_matrix, axis=0
)
return {
"angle": angle,
"rotation_matrix": rotation_matrix,
"image_height": image_height,
"image_width": image_width,
"batch_size": batch_size,
}
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"data_format": self.data_format,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/aug_mix_test.py | keras/src/layers/preprocessing/image_preprocessing/aug_mix_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandAugmentTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AugMix,
init_kwargs={
"value_range": (0, 255),
"num_chains": 2,
"chain_depth": 2,
"factor": 1,
"alpha": 1.0,
"all_ops": True,
"interpolation": "nearest",
"seed": 43,
"data_format": "channels_last",
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_aug_mix_inference(self):
seed = 3481
layer = layers.AugMix()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_augment_randomness(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.AugMix(
num_chains=11, all_ops=True, data_format=data_format
)
augmented_image = layer(input_data)
self.assertNotAllClose(
backend.convert_to_numpy(augmented_image), input_data
)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.AugMix(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py | keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.RandomColorDegeneration")
class RandomColorDegeneration(BaseImagePreprocessingLayer):
"""Randomly performs the color degeneration operation on given images.
The sharpness operation first converts an image to gray scale, then back to
color. It then takes a weighted average between original image and the
degenerated image. This makes colors appear more dull.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A tuple of two floats or a single float.
`factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the degenerated result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
(batch_size, 1, 1, 1),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
factor = factor
return {"factor": factor}
def transform_images(self, images, transformation=None, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
factor = self.backend.cast(
transformation["factor"], self.compute_dtype
)
degenerates = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
images = images + factor * (degenerates - images)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_flip.py | keras/src/layers/preprocessing/image_preprocessing/random_flip.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
@keras_export("keras.layers.RandomFlip")
class RandomFlip(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly flips images during training.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute. During inference time, the output will be identical to
input. Call the layer with `training=True` to flip the input.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`. `"horizontal"` is a
left-right flip and `"vertical"` is a top-bottom flip. Defaults to
`"horizontal_and_vertical"`
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
_USE_BASE_FACTOR = False
def __init__(
self,
mode=HORIZONTAL_AND_VERTICAL,
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.mode = mode
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
shape = self.backend.core.shape(images)
if len(shape) == 3:
flips_shape = (1, 1, 1)
else:
flips_shape = (shape[0], 1, 1, 1)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
flips = self.backend.numpy.less_equal(
self.backend.random.uniform(shape=flips_shape, seed=seed), 0.5
)
return {"flips": flips, "input_shape": shape}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self._flip_inputs(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
def _flip_boxes_horizontal(boxes):
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
outputs = self.backend.numpy.concatenate(
[1 - x3, x2, 1 - x1, x4], axis=-1
)
return outputs
def _flip_boxes_vertical(boxes):
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
outputs = self.backend.numpy.concatenate(
[x1, 1 - x4, x3, 1 - x2], axis=-1
)
return outputs
def _transform_xyxy(boxes, box_flips):
bboxes = boxes["boxes"]
if self.mode in {HORIZONTAL, HORIZONTAL_AND_VERTICAL}:
bboxes = self.backend.numpy.where(
box_flips,
_flip_boxes_horizontal(bboxes),
bboxes,
)
if self.mode in {VERTICAL, HORIZONTAL_AND_VERTICAL}:
bboxes = self.backend.numpy.where(
box_flips,
_flip_boxes_vertical(bboxes),
bboxes,
)
return bboxes
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
flips = self.backend.numpy.squeeze(transformation["flips"], axis=-1)
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
input_height, input_width = (
transformation["input_shape"][height_axis],
transformation["input_shape"][width_axis],
)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
height=input_height,
width=input_width,
)
bounding_boxes["boxes"] = _transform_xyxy(bounding_boxes, flips)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
height=input_height,
width=input_width,
)
self.backend.reset()
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def _flip_inputs(self, inputs, transformation):
if transformation is None:
return inputs
flips = transformation["flips"]
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
flipped_outputs = inputs
if self.data_format == "channels_last":
horizontal_axis = -2
vertical_axis = -3
else:
horizontal_axis = -1
vertical_axis = -2
if self.mode == HORIZONTAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
flips,
self.backend.numpy.flip(flipped_outputs, axis=horizontal_axis),
flipped_outputs,
)
if self.mode == VERTICAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
flips,
self.backend.numpy.flip(flipped_outputs, axis=vertical_axis),
flipped_outputs,
)
if unbatched:
flipped_outputs = self.backend.numpy.squeeze(
flipped_outputs, axis=0
)
return flipped_outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"seed": self.seed,
"mode": self.mode,
"data_format": self.data_format,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py | keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.RandomSharpness")
class RandomSharpness(BaseImagePreprocessingLayer):
"""Randomly performs the sharpness operation on given images.
The sharpness operation first performs a blur, then blends between the
original image and the processed image. This operation adjusts the clarity
of the edges in an image, ranging from blurred to enhanced sharpness.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A tuple of two floats or a single float.
`factor` controls the extent to which the image sharpness
is impacted. `factor=0.0` results in a fully blurred image,
`factor=0.5` applies no operation (preserving the original image),
and `factor=1.0` enhances the sharpness beyond the original. Values
should be between `0.0` and `1.0`. If a tuple is used, a `factor`
is sampled between the two values for every image augmented.
If a single float is used, a value between `0.0` and the passed
float is sampled. To ensure the value is always the same,
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
return {"factor": factor}
def transform_images(self, images, transformation=None, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
if self.data_format == "channels_first":
images = self.backend.numpy.swapaxes(images, -3, -1)
sharpness_factor = self.backend.cast(
transformation["factor"] * 2, dtype=self.compute_dtype
)
sharpness_factor = self.backend.numpy.reshape(
sharpness_factor, (-1, 1, 1, 1)
)
num_channels = self.backend.shape(images)[-1]
a, b = 1.0 / 13.0, 5.0 / 13.0
kernel = self.backend.convert_to_tensor(
[[a, a, a], [a, b, a], [a, a, a]], dtype=self.compute_dtype
)
kernel = self.backend.numpy.reshape(kernel, (3, 3, 1, 1))
kernel = self.backend.numpy.tile(kernel, [1, 1, num_channels, 1])
kernel = self.backend.cast(kernel, self.compute_dtype)
smoothed_image = self.backend.nn.depthwise_conv(
images,
kernel,
strides=1,
padding="same",
data_format="channels_last",
)
smoothed_image = self.backend.cast(
smoothed_image, dtype=self.compute_dtype
)
images = images + (1.0 - sharpness_factor) * (
smoothed_image - images
)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
if self.data_format == "channels_first":
images = self.backend.numpy.swapaxes(images, -3, -1)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/rand_augment_test.py | keras/src/layers/preprocessing/image_preprocessing/rand_augment_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandAugmentTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandAugment,
init_kwargs={
"value_range": (0, 255),
"num_ops": 2,
"factor": 1,
"interpolation": "nearest",
"seed": 1,
"data_format": "channels_last",
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_rand_augment_inference(self):
seed = 3481
layer = layers.RandAugment()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_rand_augment_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandAugment(data_format=data_format)
augmented_image = layer(input_data)
self.assertEqual(augmented_image.shape, input_data.shape)
def test_rand_augment_no_operations(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandAugment(num_ops=0, data_format=data_format)
augmented_image = layer(input_data)
self.assertAllClose(
backend.convert_to_numpy(augmented_image), input_data
)
def test_random_augment_randomness(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandAugment(num_ops=11, data_format=data_format)
augmented_image = layer(input_data)
self.assertNotAllClose(
backend.convert_to_numpy(augmented_image), input_data
)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandAugment(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
def test_rand_augment_tf_data_bounding_boxes(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
layer = layers.RandAugment(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
ds.map(layer)
def test_graph_issue(self):
input_data = np.random.random((10, 8, 8, 3))
layer = layers.RandAugment()
ds = (
tf_data.Dataset.from_tensor_slices(input_data)
.batch(2)
.map(lambda x: layer.get_random_transformation(x))
)
key_list = []
for output in ds:
key_list.append(output["layer_idxes"])
self.assertNotEqual(len(np.unique(key_list)), 1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_sharpness_test.py | keras/src/layers/preprocessing/image_preprocessing/random_sharpness_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomSharpnessTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomSharpness,
init_kwargs={
"factor": 0.75,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_sharpness_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomSharpness(0.2)
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_sharpness_no_op(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomSharpness((0.5, 0.5))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_sharpness_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomSharpness(0.2)
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomSharpness(
factor=0.5, data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform_test.py | keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomElasticTransformTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomElasticTransform,
init_kwargs={
"factor": 1.0,
"scale": 0.5,
"interpolation": "bilinear",
"fill_mode": "reflect",
"fill_value": 0,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_training_check=False,
)
def test_random_elastic_transform_inference(self):
seed = 3481
layer = layers.RandomElasticTransform()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_elastic_transform_no_op(self):
seed = 3481
layer = layers.RandomElasticTransform(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
layer = layers.RandomElasticTransform(scale=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_elastic_transform_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.zeros((8, 8, 1))
inputs[3:5, 3:5, :] = 1.0
else:
inputs = np.zeros((1, 8, 8))
inputs[:, 3:5, 3:5] = 1.0
layer = layers.RandomElasticTransform(data_format=data_format)
transformation = {
"apply_transform": np.array([True]),
"distortion_factor": np.float32(0.9109325),
"seed": 42,
}
output = layer.transform_images(inputs, transformation)
self.assertNotAllClose(inputs, output)
self.assertEqual(inputs.shape, output.shape)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomElasticTransform(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
print("Output shape:", output.shape) # Debugging line
output_numpy = output.numpy()
print("Output numpy shape:", output_numpy.shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_brightness.py | keras/src/layers/preprocessing/image_preprocessing/random_brightness.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomBrightness")
class RandomBrightness(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly adjusts brightness during training.
This layer will randomly increase/reduce the brightness for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white.
When only one float is provided, eg, 0.2,
then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats
for the lower and upper limit
of the values of the input data.
To make no change, use `[0.0, 1.0]`, e.g., if the image input
has been scaled before this layer. Defaults to `[0.0, 255.0]`.
The brightness adjustment will be scaled to this range, and the
output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats.
The output value will be clipped to the range `[0, 255]`,
the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
Example:
```python
random_bright = keras.layers.RandomBrightness(factor=0.2)
# An image with shape [2, 2, 3]
image = [[[1, 2, 3], [4 ,5 ,6]], [[7, 8, 9], [10, 11, 12]]]
# Assume we randomly select the factor to be 0.1, then it will apply
# 0.1 * 255 to all the channel
output = random_bright(image, training=True)
# output will be int64 with 25.5 added to each channel and round down.
>>> array([[[26.5, 27.5, 28.5]
[29.5, 30.5, 31.5]]
[[32.5, 33.5, 34.5]
[35.5, 36.5, 37.5]]],
shape=(2, 2, 3), dtype=int64)
```
"""
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(factor=factor, **kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
rgb_delta_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
rgb_delta_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if not training:
return {"rgb_delta": self.backend.numpy.zeros(rgb_delta_shape)}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
rgb_delta = self.backend.random.uniform(
minval=self.factor[0],
maxval=self.factor[1],
shape=rgb_delta_shape,
seed=seed,
)
rgb_delta = rgb_delta * (self.value_range[1] - self.value_range[0])
return {"rgb_delta": rgb_delta}
def transform_images(self, images, transformation, training=True):
if training:
rgb_delta = transformation["rgb_delta"]
rgb_delta = self.backend.cast(rgb_delta, images.dtype)
images += rgb_delta
return self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_erasing_test.py | keras/src/layers/preprocessing/image_preprocessing/random_erasing_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomErasingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomErasing,
init_kwargs={
"factor": 1.0,
"scale": 0.5,
"fill_value": 0,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_erasing_inference(self):
seed = 3481
layer = layers.RandomErasing()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_erasing_no_op(self):
seed = 3481
layer = layers.RandomErasing(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
layer = layers.RandomErasing(scale=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_erasing_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.ones((2, 2, 1))
expected_output = np.array([[[[0.0], [1.0]], [[1.0], [1.0]]]])
else:
inputs = np.ones((1, 2, 2))
expected_output = np.array(
[[[[0.0, 0.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]]]
)
layer = layers.RandomErasing(data_format=data_format)
transformation = {
"apply_erasing": np.asarray([True]),
"batch_masks": np.asarray(
[[[[True], [False]], [[False], [False]]]]
),
"fill_value": 0,
}
output = layer.transform_images(inputs, transformation)
print(output)
self.assertAllClose(expected_output, output)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomErasing(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_zoom_test.py | keras/src/layers/preprocessing/image_preprocessing/random_zoom_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.utils import backend_utils
class RandomZoomTest(testing.TestCase):
@parameterized.named_parameters(
("random_zoom_in_4_by_6", -0.4, -0.6),
("random_zoom_in_2_by_3", -0.2, -0.3),
("random_zoom_in_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)),
("random_zoom_out_4_by_6", 0.4, 0.6),
("random_zoom_out_2_by_3", 0.2, 0.3),
("random_zoom_out_tuple_factor", (0.4, 0.5), (0.2, 0.3)),
)
def test_random_zoom(self, height_factor, width_factor):
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": height_factor,
"width_factor": width_factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_zoom_out_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 2.7, 4.5, 6.3, 0],
[0, 10.2, 12.0, 13.8, 0],
[0, 17.7, 19.5, 21.3, 0],
[0, 0, 0, 0, 0],
]
)
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, input_shape)
)
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": (0.5, 0.5),
"width_factor": (0.8, 0.8),
"interpolation": "bilinear",
"fill_mode": "constant",
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_random_zoom_in_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
expected_output = np.asarray(
[
[6.0, 6.5, 7.0, 7.5, 8.0],
[8.5, 9.0, 9.5, 10.0, 10.5],
[11.0, 11.5, 12.0, 12.5, 13.0],
[13.5, 14.0, 14.5, 15.0, 15.5],
[16.0, 16.5, 17.0, 17.5, 18.0],
]
)
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, input_shape)
)
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": (-0.5, -0.5),
"width_factor": (-0.5, -0.5),
"interpolation": "bilinear",
"fill_mode": "constant",
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomZoom(
height_factor=(0.5, 0.5),
width_factor=(0.8, 0.8),
interpolation="nearest",
fill_mode="constant",
)
ds = tf_data.Dataset.from_tensor_slices(input_image).batch(1).map(layer)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).reshape(input_shape)
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
def test_dynamic_shape(self):
inputs = layers.Input((None, None, 3))
outputs = layers.RandomZoom(
height_factor=(0.5, 0.5),
width_factor=(0.8, 0.8),
interpolation="nearest",
fill_mode="constant",
)(inputs)
model = models.Model(inputs, outputs)
model.predict(np.random.random((1, 6, 6, 3)))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="The NumPy backend does not implement fit.",
)
def test_connect_with_flatten(self):
model = models.Sequential(
[
layers.RandomZoom((-0.5, 0.0), (-0.5, 0.0)),
layers.Flatten(),
layers.Dense(1, activation="relu"),
],
)
model.compile(loss="mse")
model.fit(np.random.random((2, 2, 2, 1)), y=np.random.random((2,)))
@parameterized.named_parameters(
(
"with_zoom_in",
[[[0.1]], [[0.1]]],
[[[0.0, 0.0, 8.0, 0.0], [8.0, 0.0, 8.0, 10.0]]],
),
(
"with_zoom_out",
[[[1.9]], [[1.9]]],
[
[
[2.710526, 2.657895, 3.763158, 3.710526],
[4.815789, 4.236842, 5.868421, 5.289474],
]
],
),
)
def test_random_flip_bounding_boxes(self, zoom, expected_boxes):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
random_zoom_layer = layers.RandomZoom(
height_factor=(0.5, 0.5),
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"height_zoom": backend_utils.convert_tf_tensor(np.array(zoom[0])),
"width_zoom": backend_utils.convert_tf_tensor(np.array(zoom[1])),
"input_shape": image_shape,
}
output = random_zoom_layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"with_zoom_in",
[[[0.1]], [[0.1]]],
[[[0.0, 0.0, 8.0, 0.0], [8.0, 0.0, 8.0, 10.0]]],
),
(
"with_zoom_out",
[[[1.9]], [[1.9]]],
[
[
[2.710526, 2.657895, 3.763158, 3.710526],
[4.815789, 4.236842, 5.868421, 5.289474],
]
],
),
)
def test_random_flip_tf_data_bounding_boxes(self, zoom, expected_boxes):
data_format = backend.config.image_data_format()
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
random_zoom_layer = layers.RandomZoom(
height_factor=0.5,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"height_zoom": np.array(zoom[0]),
"width_zoom": np.array(zoom[1]),
"input_shape": image_shape,
}
ds = ds.map(
lambda x: random_zoom_layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/__init__.py | keras/src/layers/preprocessing/image_preprocessing/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_shear.py | keras/src/layers/preprocessing/image_preprocessing/random_shear.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandomShear")
class RandomShear(BaseImagePreprocessingLayer):
"""A preprocessing layer that randomly applies shear transformations to
images.
This layer shears the input images along the x-axis and/or y-axis by a
randomly selected factor within the specified range. The shear
transformation is applied to each image independently in a batch. Empty
regions created during the transformation are filled according to the
`fill_mode` and `fill_value` parameters.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
x_factor: A tuple of two floats. For each augmented image, a value
is sampled from the provided range. If a float is passed, the
range is interpreted as `(0, x_factor)`. Values represent a
percentage of the image to shear over. For example, 0.3 shears
pixels up to 30% of the way across the image. All provided values
should be positive.
y_factor: A tuple of two floats. For each augmented image, a value
is sampled from the provided range. If a float is passed, the
range is interpreted as `(0, y_factor)`. Values represent a
percentage of the image to shear over. For example, 0.3 shears
pixels up to 30% of the way across the image. All provided values
should be positive.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the
last pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond the edge
with the same constant value `k` specified by `fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does
not support `"reflect"`.
Note that torch backend does not support `"wrap"`.
fill_value: A float representing the value to be filled outside the
boundaries when `fill_mode="constant"`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [0, 1.0]. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
x_factor=0.0,
y_factor=0.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.x_factor = self._set_factor_with_name(x_factor, "x_factor")
self.y_factor = self._set_factor_with_name(y_factor, "y_factor")
if fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.supports_jit = False
def _set_factor_with_name(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
lower, upper = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < 0.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 3:
batch_size = 1
else:
batch_size = images_shape[0]
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
invert = self.backend.random.uniform(
minval=0,
maxval=1,
shape=[batch_size, 1],
seed=seed,
dtype=self.compute_dtype,
)
invert = self.backend.numpy.where(
invert > 0.5,
-self.backend.numpy.ones_like(invert),
self.backend.numpy.ones_like(invert),
)
shear_y = self.backend.random.uniform(
minval=self.y_factor[0],
maxval=self.y_factor[1],
shape=[batch_size, 1],
seed=seed,
dtype=self.compute_dtype,
)
shear_x = self.backend.random.uniform(
minval=self.x_factor[0],
maxval=self.x_factor[1],
shape=[batch_size, 1],
seed=seed,
dtype=self.compute_dtype,
)
shear_factor = (
self.backend.cast(
self.backend.numpy.concatenate([shear_x, shear_y], axis=1),
dtype=self.compute_dtype,
)
* invert
)
return {"shear_factor": shear_factor, "input_shape": images_shape}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self._shear_inputs(images, transformation)
return images
def _shear_inputs(self, inputs, transformation):
if transformation is None:
return inputs
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
shear_factor = transformation["shear_factor"]
outputs = self.backend.image.affine_transform(
inputs,
transform=self._get_shear_matrix(shear_factor),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def _get_shear_matrix(self, shear_factors):
num_shear_factors = self.backend.shape(shear_factors)[0]
# The shear matrix looks like:
# [[1 s_x 0]
# [s_y 1 0]
# [0 0 1]]
return self.backend.numpy.stack(
[
self.backend.numpy.ones((num_shear_factors,)),
shear_factors[:, 0],
self.backend.numpy.zeros((num_shear_factors,)),
shear_factors[:, 1],
self.backend.numpy.ones((num_shear_factors,)),
self.backend.numpy.zeros((num_shear_factors,)),
self.backend.numpy.zeros((num_shear_factors,)),
self.backend.numpy.zeros((num_shear_factors,)),
],
axis=1,
)
def transform_labels(self, labels, transformation, training=True):
return labels
def get_transformed_x_y(self, x, y, transform):
a0, a1, a2, b0, b1, b2, c0, c1 = self.backend.numpy.split(
transform, 8, axis=-1
)
k = c0 * x + c1 * y + 1
x_transformed = (a0 * x + a1 * y + a2) / k
y_transformed = (b0 * x + b1 * y + b2) / k
return x_transformed, y_transformed
def get_shifted_bbox(self, bounding_boxes, w_shift_factor, h_shift_factor):
bboxes = bounding_boxes["boxes"]
x1, x2, x3, x4 = self.backend.numpy.split(bboxes, 4, axis=-1)
w_shift_factor = self.backend.convert_to_tensor(
w_shift_factor, dtype=x1.dtype
)
h_shift_factor = self.backend.convert_to_tensor(
h_shift_factor, dtype=x1.dtype
)
if len(bboxes.shape) == 3:
w_shift_factor = self.backend.numpy.expand_dims(w_shift_factor, -1)
h_shift_factor = self.backend.numpy.expand_dims(h_shift_factor, -1)
bounding_boxes["boxes"] = self.backend.numpy.concatenate(
[
x1 - w_shift_factor,
x2 - h_shift_factor,
x3 - w_shift_factor,
x4 - h_shift_factor,
],
axis=-1,
)
return bounding_boxes
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
def _get_height_width(transformation):
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
input_height, input_width = (
transformation["input_shape"][height_axis],
transformation["input_shape"][width_axis],
)
return input_height, input_width
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
input_height, input_width = _get_height_width(transformation)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
height=input_height,
width=input_width,
dtype=self.compute_dtype,
)
bounding_boxes = self._shear_bboxes(bounding_boxes, transformation)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="rel_xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
height=input_height,
width=input_width,
dtype=self.compute_dtype,
)
self.backend.reset()
return bounding_boxes
def _shear_bboxes(self, bounding_boxes, transformation):
shear_factor = self.backend.cast(
transformation["shear_factor"], dtype=self.compute_dtype
)
shear_x_amount, shear_y_amount = self.backend.numpy.split(
shear_factor, 2, axis=-1
)
x1, y1, x2, y2 = self.backend.numpy.split(
bounding_boxes["boxes"], 4, axis=-1
)
x1 = self.backend.numpy.squeeze(x1, axis=-1)
y1 = self.backend.numpy.squeeze(y1, axis=-1)
x2 = self.backend.numpy.squeeze(x2, axis=-1)
y2 = self.backend.numpy.squeeze(y2, axis=-1)
if shear_x_amount is not None:
x1_top = x1 - (shear_x_amount * y1)
x1_bottom = x1 - (shear_x_amount * y2)
x1 = self.backend.numpy.where(shear_x_amount < 0, x1_top, x1_bottom)
x2_top = x2 - (shear_x_amount * y1)
x2_bottom = x2 - (shear_x_amount * y2)
x2 = self.backend.numpy.where(shear_x_amount < 0, x2_bottom, x2_top)
if shear_y_amount is not None:
y1_left = y1 - (shear_y_amount * x1)
y1_right = y1 - (shear_y_amount * x2)
y1 = self.backend.numpy.where(shear_y_amount > 0, y1_right, y1_left)
y2_left = y2 - (shear_y_amount * x1)
y2_right = y2 - (shear_y_amount * x2)
y2 = self.backend.numpy.where(shear_y_amount > 0, y2_left, y2_right)
boxes = self.backend.numpy.concatenate(
[
self.backend.numpy.expand_dims(x1, axis=-1),
self.backend.numpy.expand_dims(y1, axis=-1),
self.backend.numpy.expand_dims(x2, axis=-1),
self.backend.numpy.expand_dims(y2, axis=-1),
],
axis=-1,
)
bounding_boxes["boxes"] = boxes
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def get_config(self):
base_config = super().get_config()
config = {
"x_factor": self.x_factor,
"y_factor": self.y_factor,
"fill_mode": self.fill_mode,
"interpolation": self.interpolation,
"seed": self.seed,
"fill_value": self.fill_value,
"data_format": self.data_format,
}
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_brightness_test.py | keras/src/layers/preprocessing/image_preprocessing/random_brightness_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomBrightnessTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomBrightness,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_brightness_inference(self):
seed = 3481
layer = layers.RandomBrightness([0, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_correctness(self):
seed = 2390
# Always scale up, but randomly between 0 ~ 255
layer = layers.RandomBrightness([0.1, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = backend.convert_to_numpy(layer(inputs))
diff = output - inputs
diff = backend.convert_to_numpy(diff)
self.assertTrue(np.amin(diff) >= 0)
self.assertTrue(np.mean(diff) > 0)
# Always scale down, but randomly between 0 ~ 255
layer = layers.RandomBrightness([-1.0, -0.1])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = backend.convert_to_numpy(layer(inputs))
diff = output - inputs
self.assertTrue(np.amax(diff) <= 0)
self.assertTrue(np.mean(diff) < 0)
def test_tf_data_compatibility(self):
layer = layers.RandomBrightness(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
def test_value_range_incorrect_type(self):
with self.assertRaisesRegex(
ValueError,
"The `value_range` argument should be a list of two numbers.*",
):
layers.RandomBrightness(factor=0.1, value_range="incorrect_type")
def test_value_range_incorrect_length(self):
with self.assertRaisesRegex(
ValueError,
"The `value_range` argument should be a list of two numbers.*",
):
layers.RandomBrightness(factor=0.1, value_range=[10])
def test_set_factor_incorrect_length(self):
layer = layers.RandomBrightness(factor=0.5)
with self.assertRaisesRegex(
ValueError, "The `factor` argument should be a number.*"
):
layer._set_factor([0.1]) # Only one element in list
def test_set_factor_incorrect_type(self):
layer = layers.RandomBrightness(factor=0.5)
with self.assertRaisesRegex(
ValueError, "The `factor` argument should be a number.*"
):
layer._set_factor(
"invalid_type"
) # Passing a string instead of a number or a list/tuple of numbers
def test_factor_range_below_lower_bound(self):
with self.assertRaisesRegex(
ValueError, "The `factor` argument should be a number.*"
):
# Passing a value less than -1.0
layers.RandomBrightness(factor=-1.1)
def test_factor_range_above_upper_bound(self):
with self.assertRaisesRegex(
ValueError, "The `factor` argument should be a number.*"
):
# Passing a value more than 1.0
layers.RandomBrightness(factor=1.1)
def test_randomly_adjust_brightness_input_incorrect_rank(self):
layer = layers.RandomBrightness(factor=0.1)
wrong_rank_input = np.random.rand(10, 10)
with self.assertRaisesRegex(
ValueError,
"Expected the input image to be rank 3 or 4.",
):
layer(
wrong_rank_input, training=True
) # Call the method that triggers the error
def test_dict_input(self):
layer = layers.RandomBrightness(factor=0.1, bounding_box_format="xyxy")
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertAllClose(
data["bounding_boxes"]["boxes"],
transformed_data["bounding_boxes"]["boxes"],
)
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_saturation.py | keras/src/layers/preprocessing/image_preprocessing/random_saturation.py | from keras.src.api_export import keras_export
from keras.src.backend import epsilon
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.RandomSaturation")
class RandomSaturation(BaseImagePreprocessingLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A tuple of two floats or a single float.
`factor` controls the extent to which the image saturation
is impacted. `factor=0.5` makes this layer perform a no-op
operation. `factor=0.0` makes the image fully grayscale.
`factor=1.0` makes the image fully saturated. Values should
be between `0.0` and `1.0`. If a tuple is used, a `factor`
is sampled between the two values for every image augmented.
If a single float is used, a value between `0.0` and the passed
float is sampled. To ensure the value is always the same,
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
images = images.astype("float32")
random_saturation = keras.layers.RandomSaturation(factor=0.2)
augmented_images = random_saturation(images)
```
"""
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
factor = factor / (1 - factor + epsilon())
return {"factor": factor}
def transform_images(self, images, transformation=None, training=True):
if training:
adjust_factors = transformation["factor"]
adjust_factors = self.backend.cast(
adjust_factors, self.compute_dtype
)
adjust_factors = self.backend.numpy.reshape(
adjust_factors, self.backend.shape(adjust_factors) + (1, 1)
)
images = self.backend.image.rgb_to_hsv(
images, data_format=self.data_format
)
if self.data_format == "channels_first":
s_channel = self.backend.numpy.multiply(
images[:, 1, :, :], adjust_factors
)
s_channel = self.backend.numpy.clip(
s_channel, self.value_range[0], self.value_range[1]
)
images = self.backend.numpy.stack(
[images[:, 0, :, :], s_channel, images[:, 2, :, :]], axis=1
)
else:
s_channel = self.backend.numpy.multiply(
images[..., 1], adjust_factors
)
s_channel = self.backend.numpy.clip(
s_channel, self.value_range[0], self.value_range[1]
)
images = self.backend.numpy.stack(
[images[..., 0], s_channel, images[..., 2]], axis=-1
)
images = self.backend.image.hsv_to_rgb(
images, data_format=self.data_format
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/mix_up_test.py | keras/src/layers/preprocessing/image_preprocessing/mix_up_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.backend import convert_to_tensor
class MixUpTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
"alpha": 0.2,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
# StatelessRandomGammaV3 is not supported on XLA_GPU_JIT
run_training_check=not testing.tensorflow_uses_gpu(),
)
def test_mix_up_inference(self):
seed = 3481
layer = layers.MixUp(alpha=0.2)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_mix_up_basic_functionality(self):
image = np.random.random((64, 64, 3))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 64, 64, 3))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_mix_up_basic_functionality_channel_first(self):
image = np.random.random((3, 64, 64))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 3, 64, 64))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_tf_data_compatibility(self):
layer = layers.MixUp()
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
def test_mix_up_bounding_boxes(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([1, 2]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
expected_boxes = [[2, 1, 4, 3, 6, 4, 8, 6], [6, 4, 8, 6, 2, 1, 4, 3]]
random_flip_layer = layers.MixUp(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"mix_weight": convert_to_tensor([0.5, 0.5]),
"permutation_order": convert_to_tensor([1, 0]),
}
output = random_flip_layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
def test_mix_up_tf_data_bounding_boxes(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
expected_boxes = [[2, 1, 4, 3, 6, 4, 8, 6], [6, 4, 8, 6, 2, 1, 4, 3]]
ds = tf_data.Dataset.from_tensor_slices(input_data)
layer = layers.MixUp(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"mix_weight": convert_to_tensor([0.5, 0.5]),
"permutation_order": convert_to_tensor([1, 0]),
}
ds = ds.map(
lambda x: layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_hue_test.py | keras/src/layers/preprocessing/image_preprocessing/random_hue_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomHueTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomHue,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_hue_inference(self):
seed = 3481
layer = layers.RandomHue(0.2, [0, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_hue_value_range_0_to_1(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomHue(0.2, (0, 1))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_hue_value_range_0_to_255(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=255)
layer = layers.RandomHue(0.2, (0, 255))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 255))
def test_random_hue_no_change_with_zero_factor(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = keras.random.randint((224, 224, 3), 0, 255)
else:
inputs = keras.random.randint((3, 224, 224), 0, 255)
layer = layers.RandomHue(0, (0, 255), data_format=data_format)
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_hue_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomHue(0.2, (0, 255))
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomHue(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_erasing.py | keras/src/layers/preprocessing/image_preprocessing/random_erasing.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.RandomErasing")
class RandomErasing(BaseImagePreprocessingLayer):
"""Random Erasing data augmentation technique.
Random Erasing is a data augmentation method where random patches of
an image are erased (replaced by a constant value or noise)
during training to improve generalization.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [Random Erasing paper](https://arxiv.org/abs/1708.04896).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the probability of applying the transformation.
- `factor=0.0` ensures no erasing is applied.
- `factor=1.0` means erasing is always applied.
- If a tuple `(min, max)` is provided, a probability value
is sampled between `min` and `max` for each image.
- If a single float is provided, a probability is sampled
between `0.0` and the given float.
Default is 1.0.
scale: A tuple of two floats representing the aspect ratio range of
the erased patch. This defines the width-to-height ratio of
the patch to be erased. It can help control the rw shape of
the erased region. Default is (0.02, 0.33).
fill_value: A value to fill the erased region with. This can be set to
a constant value or `None` to sample a random value
from a normal distribution. Default is `None`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor=1.0,
scale=(0.02, 0.33),
fill_value=None,
value_range=(0, 255),
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.scale = self._set_factor_by_name(scale, "scale")
self.fill_value = fill_value
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def _set_factor_by_name(self, factor, name):
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
return lower, upper
def _compute_crop_bounds(self, batch_size, image_length, crop_ratio, seed):
crop_length = self.backend.cast(
crop_ratio * image_length, dtype=self.compute_dtype
)
start_pos = self.backend.random.uniform(
shape=[batch_size],
minval=0,
maxval=1,
dtype=self.compute_dtype,
seed=seed,
) * (image_length - crop_length)
end_pos = start_pos + crop_length
return start_pos, end_pos
def _generate_batch_mask(self, images_shape, box_corners):
def _generate_grid_xy(image_height, image_width):
grid_y, grid_x = self.backend.numpy.meshgrid(
self.backend.numpy.arange(
image_height, dtype=self.compute_dtype
),
self.backend.numpy.arange(
image_width, dtype=self.compute_dtype
),
indexing="ij",
)
if self.data_format == "channels_last":
grid_y = self.backend.cast(
grid_y[None, :, :, None], dtype=self.compute_dtype
)
grid_x = self.backend.cast(
grid_x[None, :, :, None], dtype=self.compute_dtype
)
else:
grid_y = self.backend.cast(
grid_y[None, None, :, :], dtype=self.compute_dtype
)
grid_x = self.backend.cast(
grid_x[None, None, :, :], dtype=self.compute_dtype
)
return grid_x, grid_y
image_height, image_width = (
images_shape[self.height_axis],
images_shape[self.width_axis],
)
grid_x, grid_y = _generate_grid_xy(image_height, image_width)
x0, x1, y0, y1 = box_corners
x0 = x0[:, None, None, None]
y0 = y0[:, None, None, None]
x1 = x1[:, None, None, None]
y1 = y1[:, None, None, None]
batch_masks = (
(grid_x >= x0) & (grid_x < x1) & (grid_y >= y0) & (grid_y < y1)
)
batch_masks = self.backend.numpy.repeat(
batch_masks, images_shape[self.channel_axis], axis=self.channel_axis
)
return batch_masks
def _get_fill_value(self, images, images_shape, seed):
fill_value = self.fill_value
if fill_value is None:
fill_value = (
self.backend.random.normal(
images_shape,
dtype=self.compute_dtype,
seed=seed,
)
* self.value_range[1]
)
else:
error_msg = (
"The `fill_value` argument should be a number "
"(or a list of three numbers) "
)
if isinstance(fill_value, (tuple, list)):
if len(fill_value) != 3:
raise ValueError(error_msg)
fill_value = self.backend.numpy.full_like(
images, fill_value, dtype=self.compute_dtype
)
elif isinstance(fill_value, (int, float)):
fill_value = (
self.backend.numpy.ones(
images_shape, dtype=self.compute_dtype
)
* fill_value
)
else:
raise ValueError(error_msg)
fill_value = self.backend.numpy.clip(
fill_value, self.value_range[0], self.value_range[1]
)
return fill_value
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
image_height = images_shape[self.height_axis]
image_width = images_shape[self.width_axis]
seed = seed or self._get_seed_generator(self.backend._backend)
mix_weight = self.backend.random.uniform(
shape=(batch_size, 2),
minval=self.scale[0],
maxval=self.scale[1],
dtype=self.compute_dtype,
seed=seed,
)
mix_weight = self.backend.numpy.sqrt(mix_weight)
x0, x1 = self._compute_crop_bounds(
batch_size, image_width, mix_weight[:, 0], seed
)
y0, y1 = self._compute_crop_bounds(
batch_size, image_height, mix_weight[:, 1], seed
)
batch_masks = self._generate_batch_mask(
images_shape,
(x0, x1, y0, y1),
)
erase_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
apply_erasing = random_threshold < erase_probability
fill_value = self._get_fill_value(images, images_shape, seed)
return {
"apply_erasing": apply_erasing,
"batch_masks": batch_masks,
"fill_value": fill_value,
}
def transform_images(self, images, transformation=None, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
batch_masks = transformation["batch_masks"]
apply_erasing = transformation["apply_erasing"]
fill_value = transformation["fill_value"]
erased_images = self.backend.numpy.where(
batch_masks,
fill_value,
images,
)
images = self.backend.numpy.where(
apply_erasing[:, None, None, None],
erased_images,
images,
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"scale": self.scale,
"fill_value": self.fill_value,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_crop.py | keras/src/layers/preprocessing/image_preprocessing/random_crop.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import ( # noqa: E501
densify_bounding_boxes,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomCrop")
class RandomCrop(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly crops images during training.
During training, this layer will randomly choose a location to crop images
down to a target size. The layer will crop all the images in the same batch
to the same cropping location.
At inference time, and during training if an input image is smaller than the
target size, the input will be resized and cropped so as to return the
largest possible window in the image that matches the target aspect ratio.
If you need to apply random cropping at inference time, set `training` to
True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(
self, height, width, seed=None, data_format=None, name=None, **kwargs
):
super().__init__(name=name, **kwargs)
self.height = height
self.width = width
self.seed = (
seed if seed is not None else backend.random.make_default_seed()
)
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
elif self.data_format == "channels_last":
self.height_axis = -3
self.width_axis = -2
self.supports_masking = False
self.supports_jit = False
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def get_random_transformation(self, data, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
if isinstance(data, dict):
input_shape = self.backend.shape(data["images"])
else:
input_shape = self.backend.shape(data)
input_height, input_width = (
input_shape[self.height_axis],
input_shape[self.width_axis],
)
if input_height is None or input_width is None:
raise ValueError(
"RandomCrop requires the input to have a fully defined "
f"height and width. Received: images.shape={input_shape}"
)
if training and input_height > self.height and input_width > self.width:
h_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_height - self.height + 1),
seed=seed,
),
"int32",
)
w_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_width - self.width + 1),
seed=seed,
),
"int32",
)
else:
crop_height = int(float(input_width * self.height) / self.width)
crop_height = max(min(input_height, crop_height), 1)
crop_width = int(float(input_height * self.width) / self.height)
crop_width = max(min(input_width, crop_width), 1)
h_start = int(float(input_height - crop_height) / 2)
w_start = int(float(input_width - crop_width) / 2)
return h_start, w_start
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
crop_box_hstart, crop_box_wstart = transformation
crop_height = self.height
crop_width = self.width
if self.data_format == "channels_last":
if len(images.shape) == 4:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
images = images[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(images.shape) == 4:
images = images[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
shape = self.backend.shape(images)
new_height = shape[self.height_axis]
new_width = shape[self.width_axis]
if (
not isinstance(new_height, int)
or not isinstance(new_width, int)
or new_height != self.height
or new_width != self.width
):
# Resize images if size mismatch or
# if size mismatch cannot be determined
# (in the case of a TF dynamic shape).
images = self.backend.image.resize(
images,
size=(self.height, self.width),
data_format=self.data_format,
)
# Resize may have upcasted the outputs
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
"""
bounding_boxes = {
"boxes": (batch, num_boxes, 4), # left-top-right-bottom (xyxy)
"labels": (batch, num_boxes, num_classes),
}
or
bounding_boxes = {
"boxes": (num_boxes, 4),
"labels": (num_boxes, num_classes),
}
"""
if training:
h_start, w_start = transformation
if not self.backend.is_tensor(bounding_boxes["boxes"]):
bounding_boxes = densify_bounding_boxes(
bounding_boxes, backend=self.backend
)
boxes = bounding_boxes["boxes"]
# Convert to a standard xyxy as operations are done xyxy by default.
boxes = convert_format(
boxes=boxes,
source=self.bounding_box_format,
target="xyxy",
height=self.height,
width=self.width,
)
h_start = self.backend.cast(h_start, boxes.dtype)
w_start = self.backend.cast(w_start, boxes.dtype)
if len(self.backend.shape(boxes)) == 3:
boxes = self.backend.numpy.stack(
[
self.backend.numpy.maximum(boxes[:, :, 0] - h_start, 0),
self.backend.numpy.maximum(boxes[:, :, 1] - w_start, 0),
self.backend.numpy.maximum(boxes[:, :, 2] - h_start, 0),
self.backend.numpy.maximum(boxes[:, :, 3] - w_start, 0),
],
axis=-1,
)
else:
boxes = self.backend.numpy.stack(
[
self.backend.numpy.maximum(boxes[:, 0] - h_start, 0),
self.backend.numpy.maximum(boxes[:, 1] - w_start, 0),
self.backend.numpy.maximum(boxes[:, 2] - h_start, 0),
self.backend.numpy.maximum(boxes[:, 3] - w_start, 0),
],
axis=-1,
)
# Convert to user defined bounding box format
boxes = convert_format(
boxes=boxes,
source="xyxy",
target=self.bounding_box_format,
height=self.height,
width=self.width,
)
return {
"boxes": boxes,
"labels": bounding_boxes["labels"],
}
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(segmentation_masks, transformation)
def compute_output_shape(self, input_shape, *args, **kwargs):
input_shape = list(input_shape)
input_shape[self.height_axis] = self.height
input_shape[self.width_axis] = self.width
return tuple(input_shape)
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"seed": self.seed,
"data_format": self.data_format,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_invert_test.py | keras/src/layers/preprocessing/image_preprocessing/random_invert_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomInvertTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomInvert,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_invert_inference(self):
seed = 3481
layer = layers.RandomInvert()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_invert_no_op(self):
seed = 3481
layer = layers.RandomInvert(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_invert_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((1, 8, 8, 3))
else:
input_data = np.random.random((1, 3, 8, 8))
layer = layers.RandomInvert(
factor=(1, 1),
value_range=[0, 1],
data_format=data_format,
seed=1337,
)
output = layer(input_data)
self.assertAllClose(1 - input_data, output)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomInvert(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform.py | keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomElasticTransform")
class RandomElasticTransform(BaseImagePreprocessingLayer):
"""A preprocessing layer that applies random elastic transformations.
This layer distorts input images by applying elastic deformations,
simulating a physically realistic transformation. The magnitude of the
distortion is controlled by the `scale` parameter, while the `factor`
determines the probability of applying the transformation.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the probability of applying the transformation.
- `factor=0.0` ensures no erasing is applied.
- `factor=1.0` means erasing is always applied.
- If a tuple `(min, max)` is provided, a probability value
is sampled between `min` and `max` for each image.
- If a single float is provided, a probability is sampled
between `0.0` and the given float.
Default is 1.0.
scale: A float or a tuple of two floats defining the magnitude of
the distortion applied.
- If a tuple `(min, max)` is provided, a random scale value is
sampled within this range.
- If a single float is provided, a random scale value is sampled
between `0.0` and the given float.
Default is 1.0.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
_SUPPORTED_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def __init__(
self,
factor=1.0,
scale=1.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
value_range=(0, 255),
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.scale = self._set_factor_by_name(scale, "scale")
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
if fill_mode not in self._SUPPORTED_FILL_MODES:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODES}."
)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def _set_factor_by_name(self, factor, name):
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
return lower, upper
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if (self.scale[1] == 0) or (self.factor[1] == 0):
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
unbatched = len(images_shape) == 3
if unbatched:
batch_size = 1
else:
batch_size = images_shape[0]
seed = seed or self._get_seed_generator(self.backend._backend)
transformation_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
apply_transform = random_threshold < transformation_probability
distortion_factor = self.backend.random.uniform(
shape=(),
minval=self.scale[0],
maxval=self.scale[1],
seed=seed,
dtype=self.compute_dtype,
)
return {
"apply_transform": apply_transform,
"distortion_factor": distortion_factor,
"seed": seed,
}
def get_elastic_transform_params(self, height, width, factor):
alpha_scale = 0.1 * factor
sigma_scale = 0.05 * factor
alpha = max(height, width) * alpha_scale
sigma = min(height, width) * sigma_scale
return alpha, sigma
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
apply_transform = transformation["apply_transform"]
distortion_factor = transformation["distortion_factor"]
seed = transformation["seed"]
height, width = (
images.shape[self.height_axis],
images.shape[self.width_axis],
)
alpha, sigma = self.get_elastic_transform_params(
height, width, distortion_factor
)
transformed_images = self.backend.image.elastic_transform(
images,
alpha=alpha,
sigma=sigma,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
seed=seed,
data_format=self.data_format,
)
apply_transform = (
apply_transform[:, None, None]
if len(images.shape) == 3
else apply_transform[:, None, None, None]
)
images = self.backend.numpy.where(
apply_transform,
transformed_images,
images,
)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"factor": self.factor,
"scale": self.scale,
"interpolation": self.interpolation,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"value_range": self.value_range,
"seed": self.seed,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/rand_augment.py | keras/src/layers/preprocessing/image_preprocessing/rand_augment.py | import keras.src.layers as layers
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandAugment")
class RandAugment(BaseImagePreprocessingLayer):
"""RandAugment performs the Rand Augment operation on input images.
This layer can be thought of as an all-in-one image augmentation layer. The
policy implemented by this layer has been benchmarked extensively and is
effective on a wide variety of datasets.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [RandAugment](https://arxiv.org/abs/1909.13719)
Args:
value_range: The range of values the input image can take.
Default is `(0, 255)`. Typically, this would be `(0, 1)`
for normalized images or `(0, 255)` for raw images.
num_ops: The number of augmentation operations to apply sequentially
to each image. Default is 2.
factor: The strength of the augmentation as a normalized value
between 0 and 1. Default is 0.5.
interpolation: The interpolation method to use for resizing operations.
Options include `nearest`, `bilinear`. Default is `bilinear`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_AUGMENT_LAYERS = [
"random_shear",
"random_translation",
"random_rotation",
"random_brightness",
"random_color_degeneration",
"random_contrast",
"random_sharpness",
"random_posterization",
"solarization",
"auto_contrast",
"equalization",
]
def __init__(
self,
value_range=(0, 255),
num_ops=2,
factor=0.5,
interpolation="bilinear",
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.value_range = value_range
self.num_ops = num_ops
self._set_factor(factor)
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.random_shear = layers.RandomShear(
x_factor=self.factor,
y_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_translation = layers.RandomTranslation(
height_factor=self.factor,
width_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_rotation = layers.RandomRotation(
factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_brightness = layers.RandomBrightness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_color_degeneration = layers.RandomColorDegeneration(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_contrast = layers.RandomContrast(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_sharpness = layers.RandomSharpness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.solarization = layers.Solarization(
addition_factor=self.factor,
threshold_factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_posterization = layers.RandomPosterization(
factor=max(1, int(8 * self.factor[1])),
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.auto_contrast = layers.AutoContrast(
value_range=self.value_range, data_format=data_format, **kwargs
)
self.equalization = layers.Equalization(
value_range=self.value_range, data_format=data_format, **kwargs
)
def build(self, input_shape):
for layer_name in self._AUGMENT_LAYERS:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.build(input_shape)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
for layer_name in self._AUGMENT_LAYERS:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.backend.set_backend("tensorflow")
layer_idxes = self.backend.random.randint(
(self.num_ops,),
0,
len(self._AUGMENT_LAYERS),
seed=self._get_seed_generator(self.backend._backend),
)
transformation = {}
for layer_name in self._AUGMENT_LAYERS:
augmentation_layer = getattr(self, layer_name)
transformation[layer_name] = (
augmentation_layer.get_random_transformation(
data,
training=training,
seed=self._get_seed_generator(self.backend._backend),
)
)
return {
"transforms": transformation,
"layer_idxes": layer_idxes,
}
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
layer_idxes = transformation["layer_idxes"]
transforms = transformation["transforms"]
for i in range(self.num_ops):
for idx, (key, value) in enumerate(transforms.items()):
augmentation_layer = getattr(self, key)
images = self.backend.numpy.where(
layer_idxes[i] == idx,
augmentation_layer.transform_images(images, value),
images,
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training:
layer_idxes = transformation["layer_idxes"]
transforms = transformation["transforms"]
for idx, (key, value) in enumerate(transforms.items()):
augmentation_layer = getattr(self, key)
transformed_bounding_box = (
augmentation_layer.transform_bounding_boxes(
bounding_boxes.copy(), value
)
)
for i in range(self.num_ops):
bounding_boxes["boxes"] = self.backend.numpy.where(
layer_idxes[i] == idx,
transformed_bounding_box["boxes"],
bounding_boxes["boxes"],
)
bounding_boxes["labels"] = self.backend.numpy.where(
layer_idxes[i] == idx,
transformed_bounding_box["labels"],
bounding_boxes["labels"],
)
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"value_range": self.value_range,
"num_ops": self.num_ops,
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py | keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.MaxNumBoundingBoxes")
class MaxNumBoundingBoxes(BaseImagePreprocessingLayer):
"""Ensure the maximum number of bounding boxes.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
max_number: Desired output number of bounding boxes.
padding_value: The padding value of the `boxes` and `labels` in
`bounding_boxes`. Defaults to `-1`.
"""
def __init__(self, max_number, fill_value=-1, **kwargs):
super().__init__(**kwargs)
self.max_number = int(max_number)
self.fill_value = int(fill_value)
def transform_images(self, images, transformation=None, training=True):
return images
def transform_labels(self, labels, transformation=None, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
ops = self.backend
boxes = bounding_boxes["boxes"]
labels = bounding_boxes["labels"]
boxes_shape = ops.shape(boxes)
batch_size = boxes_shape[0]
num_boxes = boxes_shape[1]
# Get pad size
pad_size = ops.numpy.maximum(
ops.numpy.subtract(self.max_number, num_boxes), 0
)
boxes = boxes[:, : self.max_number, ...]
boxes = ops.numpy.pad(
boxes,
[[0, 0], [0, pad_size], [0, 0]],
constant_values=self.fill_value,
)
labels = labels[:, : self.max_number]
labels = ops.numpy.pad(
labels, [[0, 0], [0, pad_size]], constant_values=self.fill_value
)
# Ensure shape
boxes = ops.numpy.reshape(boxes, [batch_size, self.max_number, 4])
labels = ops.numpy.reshape(labels, [batch_size, self.max_number])
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes["labels"] = labels
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation=None, training=True
):
return self.transform_images(segmentation_masks)
def compute_output_shape(self, input_shape):
if isinstance(input_shape, dict) and "bounding_boxes" in input_shape:
input_keys = set(input_shape["bounding_boxes"].keys())
extra_keys = input_keys - set(("boxes", "labels"))
if extra_keys:
raise KeyError(
"There are unsupported keys in `bounding_boxes`: "
f"{list(extra_keys)}. "
"Only `boxes` and `labels` are supported."
)
boxes_shape = list(input_shape["bounding_boxes"]["boxes"])
boxes_shape[1] = self.max_number
labels_shape = list(input_shape["bounding_boxes"]["labels"])
labels_shape[1] = self.max_number
input_shape["bounding_boxes"]["boxes"] = boxes_shape
input_shape["bounding_boxes"]["labels"] = labels_shape
return input_shape
def get_config(self):
config = super().get_config()
config.update({"max_number": self.max_number})
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py | keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
# Base case: Unbatched data
batch_size = 1
if len(images.shape) == 4:
# This is a batch of images (4D input)
batch_size = self.backend.core.shape(images)[0]
random_values = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return backend.KerasTensor(
inputs.shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/center_crop.py | keras/src/layers/preprocessing/image_preprocessing/center_crop.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.utils import image_utils
@keras_export("keras.layers.CenterCrop")
class CenterCrop(BaseImagePreprocessingLayer):
"""A preprocessing layer which crops images.
This layers crops the central portion of the images to a target size. If an
image is smaller than the target size, it will be resized and cropped
so as to return the largest possible window in the image that matches
the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
"""
_USE_BASE_FACTOR = False
def __init__(self, height, width, data_format=None, **kwargs):
super().__init__(data_format=data_format, **kwargs)
self.height = height
self.width = width
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
shape = self.backend.core.shape(images)
return {"input_shape": shape}
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
def _get_height_width(input_shape):
if self.data_format == "channels_first":
input_height = input_shape[-2]
input_width = input_shape[-1]
else:
input_height = input_shape[-3]
input_width = input_shape[-2]
return input_height, input_width
def _get_clipped_bbox(bounding_boxes, h_end, h_start, w_end, w_start):
bboxes = bounding_boxes["boxes"]
x1, y1, x2, y2 = self.backend.numpy.split(bboxes, 4, axis=-1)
x1 = self.backend.numpy.clip(x1, w_start, w_end) - w_start
y1 = self.backend.numpy.clip(y1, h_start, h_end) - h_start
x2 = self.backend.numpy.clip(x2, w_start, w_end) - w_start
y2 = self.backend.numpy.clip(y2, h_start, h_end) - h_start
bounding_boxes["boxes"] = self.backend.numpy.concatenate(
[x1, y1, x2, y2], axis=-1
)
return bounding_boxes
input_shape = transformation["input_shape"]
init_height, init_width = _get_height_width(input_shape)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=init_height,
width=init_width,
)
h_diff = init_height - self.height
w_diff = init_width - self.width
if h_diff >= 0 and w_diff >= 0:
h_start = int(h_diff / 2)
w_start = int(w_diff / 2)
h_end = h_start + self.height
w_end = w_start + self.width
bounding_boxes = _get_clipped_bbox(
bounding_boxes, h_end, h_start, w_end, w_start
)
else:
width = init_width
height = init_height
target_height = self.height
target_width = self.width
crop_height = int(float(width * target_height) / target_width)
crop_height = max(min(height, crop_height), 1)
crop_width = int(float(height * target_width) / target_height)
crop_width = max(min(width, crop_width), 1)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
h_start = crop_box_hstart
w_start = crop_box_wstart
h_end = crop_box_hstart + crop_height
w_end = crop_box_wstart + crop_width
bounding_boxes = _get_clipped_bbox(
bounding_boxes, h_end, h_start, w_end, w_start
)
bounding_boxes = convert_format(
bounding_boxes,
source="xyxy",
target="rel_xyxy",
height=crop_height,
width=crop_width,
)
bounding_boxes = convert_format(
bounding_boxes,
source="rel_xyxy",
target="xyxy",
height=self.height,
width=self.width,
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=self.height,
width=self.width,
bounding_box_format="xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
height=self.height,
width=self.width,
)
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def transform_images(self, images, transformation=None, training=True):
inputs = self.backend.cast(images, self.compute_dtype)
if self.data_format == "channels_first":
init_height = inputs.shape[-2]
init_width = inputs.shape[-1]
else:
init_height = inputs.shape[-3]
init_width = inputs.shape[-2]
if init_height is None or init_width is None:
# Dynamic size case. TODO.
raise ValueError(
"At this time, CenterCrop can only "
"process images with a static spatial "
f"shape. Received: inputs.shape={inputs.shape}"
)
h_diff = init_height - self.height
w_diff = init_width - self.width
h_start = int(h_diff / 2)
w_start = int(w_diff / 2)
if h_diff >= 0 and w_diff >= 0:
if len(inputs.shape) == 4:
if self.data_format == "channels_first":
return inputs[
:,
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
]
return inputs[
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
:,
]
elif len(inputs.shape) == 3:
if self.data_format == "channels_first":
return inputs[
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
]
return inputs[
h_start : h_start + self.height,
w_start : w_start + self.width,
:,
]
return image_utils.smart_resize(
inputs,
[self.height, self.width],
data_format=self.data_format,
backend_module=self.backend,
)
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
if isinstance(input_shape[0], (list, tuple)) or len(
input_shape
) not in (3, 4):
raise ValueError(
"`input_shape` must be a non-nested tuple or list "
"of rank-1 with size 3 (unbatched) or 4 (batched). "
)
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
input_shape[2] = self.width
else:
input_shape[2] = self.height
input_shape[3] = self.width
else:
if self.data_format == "channels_last":
input_shape[0] = self.height
input_shape[1] = self.width
else:
input_shape[1] = self.height
input_shape[2] = self.width
return tuple(input_shape)
def get_config(self):
base_config = super().get_config()
config = {
"height": self.height,
"width": self.width,
"data_format": self.data_format,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_crop_test.py | keras/src/layers/preprocessing/image_preprocessing/random_crop_test.py | import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomCropTest(testing.TestCase):
def test_random_crop(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_last",
},
input_shape=(1, 3, 4, 3),
supports_masking=False,
run_training_check=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_last",
},
input_shape=(3, 4, 3),
supports_masking=False,
run_training_check=False,
expected_output_shape=(2, 2, 3),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_first",
},
input_shape=(1, 3, 3, 4),
supports_masking=False,
run_training_check=False,
expected_output_shape=(1, 3, 2, 2),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_first",
},
input_shape=(3, 3, 4),
supports_masking=False,
run_training_check=False,
expected_output_shape=(3, 2, 2),
)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
else:
input_shape = (12, 3, 8, 16)
inp = np.random.random(input_shape)
layer = layers.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_random_crop_partial(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_height(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 10, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 10, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 10,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_width(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 18, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 18)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 18,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_dict_input(self):
layer = layers.RandomCrop(
3, 3, data_format="channels_last", bounding_box_format="xyxy"
)
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertEqual(data["bounding_boxes"]["boxes"].shape, (1, 4))
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py | keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py | class XYXY:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class REL_XYXY:
"""REL_XYXY contains axis indices for the REL_XYXY format.
REL_XYXY is like XYXY, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class CENTER_XYWH:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class XYWH:
"""XYWH contains axis indices for the XYWH format.
All values in the XYWH format should be absolute pixel values.
The XYWH format consists of the following required indices:
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class REL_XYWH:
"""REL_XYWH contains axis indices for the XYWH format.
REL_XYXY is like XYWH, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class YXYX:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
class REL_YXYX:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py | """Contains functions to compute ious of bounding boxes."""
import math
import keras
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import (
converters,
)
def _compute_area(box):
"""Computes area for bounding boxes
Args:
box: [N, 4] or [batch_size, N, 4] float Tensor, either batched
or unbatched boxes.
Returns:
a float Tensor of [N] or [batch_size, N]
"""
y_min, x_min, y_max, x_max = ops.split(box[..., :4], 4, axis=-1)
return ops.squeeze((y_max - y_min) * (x_max - x_min), axis=-1)
def _compute_intersection(boxes1, boxes2):
"""Computes intersection area between two sets of boxes.
Args:
boxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes.
boxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes.
Returns:
a [N, M] or [batch_size, N, M] float Tensor.
"""
y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
boxes2_rank = len(boxes2.shape)
perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1]
# [N, M] or [batch_size, N, M]
intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm))
intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm))
intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm))
intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm))
intersect_height = intersect_ymax - intersect_ymin
intersect_width = intersect_xmax - intersect_xmin
zeros_t = ops.cast(0, intersect_height.dtype)
intersect_height = ops.maximum(zeros_t, intersect_height)
intersect_width = ops.maximum(zeros_t, intersect_width)
return intersect_height * intersect_width
@keras_export("keras.utils.bounding_boxes.compute_iou")
def compute_iou(
boxes1,
boxes2,
bounding_box_format,
use_masking=False,
mask_val=-1,
image_shape=None,
):
"""Computes a lookup table vector containing the ious for a given set boxes.
The lookup vector is to be indexed by [`boxes1_index`,`boxes2_index`] if
boxes are unbatched and by [`batch`, `boxes1_index`,`boxes2_index`] if the
boxes are batched.
The users can pass `boxes1` and `boxes2` to be different ranks. For example:
1) `boxes1`: [batch_size, M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N].
2) `boxes1`: [batch_size, M, 4], `boxes2`: [N, 4] -> return
[batch_size, M, N]
3) `boxes1`: [M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N]
4) `boxes1`: [M, 4], `boxes2`: [N, 4] -> return [M, N]
Args:
boxes1: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
boxes2: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
bounding_box_format: a case-insensitive string which is one of `"xyxy"`,
`"rel_xyxy"`, `"xyWH"`, `"center_xyWH"`, `"yxyx"`, `"rel_yxyx"`.
For detailed information on the supported format, see the
use_masking: whether masking will be applied. This will mask all
`boxes1` or `boxes2` that have values less than 0 in all its 4
dimensions. Default to `False`.
mask_val: int to mask those returned IOUs if the masking is True,
defaults to -1.
image_shape: `Tuple[int]`. The shape of the image (height, width, 3).
When using relative bounding box format for `box_format` the
`image_shape` is used for normalization.
Returns:
iou_lookup_table: a vector containing the pairwise ious of boxes1 and
boxes2.
""" # noqa: E501
boxes1_rank = len(ops.shape(boxes1))
boxes2_rank = len(ops.shape(boxes2))
if boxes1_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes1 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes1.shape)=2 AND or len(boxes1.shape)=3."
)
if boxes2_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes2 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes2.shape)=2 AND or len(boxes2.shape)=3."
)
target_format = "yxyx"
if "rel" in bounding_box_format and image_shape is None:
raise ValueError(
"When using relative bounding box formats (e.g. `rel_yxyx`) "
"the `image_shape` argument must be provided."
f"Received `image_shape`: {image_shape}"
)
if image_shape is None:
height, width = None, None
else:
height, width, _ = image_shape
boxes1 = converters.convert_format(
boxes1,
source=bounding_box_format,
target=target_format,
height=height,
width=width,
)
boxes2 = converters.convert_format(
boxes2,
source=bounding_box_format,
target=target_format,
height=height,
width=width,
)
intersect_area = _compute_intersection(boxes1, boxes2)
boxes1_area = _compute_area(boxes1)
boxes2_area = _compute_area(boxes2)
boxes2_area_rank = len(boxes2_area.shape)
boxes2_axis = 1 if (boxes2_area_rank == 2) else 0
boxes1_area = ops.expand_dims(boxes1_area, axis=-1)
boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis)
union_area = boxes1_area + boxes2_area - intersect_area
res = ops.divide(intersect_area, union_area + backend.epsilon())
if boxes1_rank == 2:
perm = [1, 0]
else:
perm = [0, 2, 1]
if not use_masking:
return res
mask_val_t = ops.cast(mask_val, res.dtype) * ops.ones_like(res)
boxes1_mask = ops.less(ops.max(boxes1, axis=-1, keepdims=True), 0.0)
boxes2_mask = ops.less(ops.max(boxes2, axis=-1, keepdims=True), 0.0)
background_mask = ops.logical_or(
boxes1_mask, ops.transpose(boxes2_mask, perm)
)
iou_lookup_table = ops.where(background_mask, mask_val_t, res)
return iou_lookup_table
@keras_export("keras.utils.bounding_boxes.compute_ciou")
def compute_ciou(boxes1, boxes2, bounding_box_format, image_shape=None):
"""
Computes the Complete IoU (CIoU) between two bounding boxes or between
two batches of bounding boxes.
CIoU loss is an extension of GIoU loss, which further improves the IoU
optimization for object detection. CIoU loss not only penalizes the
bounding box coordinates but also considers the aspect ratio and center
distance of the boxes. The length of the last dimension should be 4 to
represent the bounding boxes.
Args:
box1 (tensor): tensor representing the first bounding box with
shape (..., 4).
box2 (tensor): tensor representing the second bounding box with
shape (..., 4).
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the [KerasCV bounding box
documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
image_shape: `Tuple[int]`. The shape of the image (height, width, 3).
When using relative bounding box format for `box_format` the
`image_shape` is used for normalization.
Returns:
tensor: The CIoU distance between the two bounding boxes.
"""
target_format = "xyxy"
if "rel" in bounding_box_format:
raise ValueError(
"When using relative bounding box formats (e.g. `rel_yxyx`) "
"the `image_shape` argument must be provided."
f"Received `image_shape`: {image_shape}"
)
if image_shape is None:
height, width = None, None
else:
height, width, _ = image_shape
boxes1 = converters.convert_format(
boxes1,
source=bounding_box_format,
target=target_format,
height=height,
width=width,
)
boxes2 = converters.convert_format(
boxes2,
source=bounding_box_format,
target=target_format,
height=height,
width=width,
)
x_min1, y_min1, x_max1, y_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
x_min2, y_min2, x_max2, y_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
width_1 = x_max1 - x_min1
height_1 = y_max1 - y_min1 + keras.backend.epsilon()
width_2 = x_max2 - x_min2
height_2 = y_max2 - y_min2 + keras.backend.epsilon()
intersection_area = ops.maximum(
ops.minimum(x_max1, x_max2) - ops.maximum(x_min1, x_min2), 0
) * ops.maximum(
ops.minimum(y_max1, y_max2) - ops.maximum(y_min1, y_min2), 0
)
union_area = (
width_1 * height_1
+ width_2 * height_2
- intersection_area
+ keras.backend.epsilon()
)
iou = ops.squeeze(
ops.divide(intersection_area, union_area + keras.backend.epsilon()),
axis=-1,
)
convex_width = ops.maximum(x_max1, x_max2) - ops.minimum(x_min1, x_min2)
convex_height = ops.maximum(y_max1, y_max2) - ops.minimum(y_min1, y_min2)
convex_diagonal_squared = ops.squeeze(
convex_width**2 + convex_height**2 + keras.backend.epsilon(),
axis=-1,
)
centers_distance_squared = ops.squeeze(
((x_min1 + x_max1) / 2 - (x_min2 + x_max2) / 2) ** 2
+ ((y_min1 + y_max1) / 2 - (y_min2 + y_max2) / 2) ** 2,
axis=-1,
)
v = ops.squeeze(
(4 / math.pi**2)
* ops.power(
(ops.arctan(width_2 / height_2) - ops.arctan(width_1 / height_1)),
2,
),
axis=-1,
)
alpha = v / (v - iou + (1 + keras.backend.epsilon()))
return iou - (
centers_distance_squared / convex_diagonal_squared + v * alpha
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.bounding_box import ( # noqa: E501
BoundingBox,
)
from keras.src.utils import backend_utils
@keras_export("keras.utils.bounding_boxes.convert_format")
def convert_format(
boxes, source, target, height=None, width=None, dtype="float32"
):
"""Converts bounding boxes between formats.
Supported formats (case-insensitive):
`"xyxy"`: [left, top, right, bottom]
`"yxyx"`: [top, left, bottom, right]
`"xywh"`: [left, top, width, height]
`"center_xywh"`: [center_x, center_y, width, height]
`"center_yxhw"`: [center_y, center_x, height, width]
`"rel_xyxy"`, `"rel_yxyx"`, `"rel_xywh"`, `"rel_center_xywh"`: Relative
versions of the above formats, where coordinates are normalized
to the range [0, 1] based on the image `height` and `width`.
Args:
boxes: Bounding boxes tensor/array or dictionary of `boxes` and
`labels`.
source: Source format string.
target: Target format string.
height: Image height (required for relative target format).
width: Image width (required for relative target format).
dtype: Data type for conversion (optional).
Returns:
Converted boxes.
Raises:
ValueError: For invalid formats, shapes, or missing dimensions.
Example:
```python
boxes = np.array([[10, 20, 30, 40], [50, 60, 70, 80]])
# Convert from 'xyxy' to 'xywh' format
boxes_xywh = keras.utils.bounding_boxes.convert_format(
boxes, source='xyxy', target='xywh'
) # Output: [[10. 20. 20. 20.], [50. 60. 20. 20.]]
# Convert to relative 'rel_xyxy' format
boxes_rel_xyxy = keras.utils.bounding_boxes.convert_format(
boxes, source='xyxy', target='rel_xyxy', height=200, width=300
) # Output: [[0.03333334 0.1 0.1 0.2 ],
#[0.16666667 0.3 0.23333333 0.4 ]]
```
"""
box_utils = BoundingBox()
# Switch to tensorflow backend if we are in tf.data pipe
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
boxes = box_utils.convert_format(
boxes=boxes,
source=source,
target=target,
height=height,
width=width,
dtype=dtype,
)
# Switch back to original backend
box_utils.backend.reset()
return boxes
@keras_export("keras.utils.bounding_boxes.clip_to_image_size")
def clip_to_image_size(
bounding_boxes, height=None, width=None, bounding_box_format="xyxy"
):
"""Clips bounding boxes to be within the image dimensions.
Args:
bounding_boxes: A dictionary with 'boxes' shape `(N, 4)` or
`(batch, N, 4)` and 'labels' shape `(N,)` or `(batch, N,)`.
height: Image height.
width: Image width.
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
Clipped bounding boxes.
Example:
```python
boxes = {"boxes": np.array([[-10, -20, 150, 160], [50, 40, 70, 80]]),
"labels": np.array([0, 1])}
clipped_boxes = keras.utils.bounding_boxes.clip_to_image_size(
boxes, height=100, width=120,
)
# Output will have boxes clipped to the image boundaries, and labels
# potentially adjusted if the clipped area becomes zero
```
"""
box_utils = BoundingBox()
# Switch to tensorflow backend if we are in tf.data pipe
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
bounding_boxes = box_utils.clip_to_image_size(
bounding_boxes,
height=height,
width=width,
bounding_box_format=bounding_box_format,
)
# Switch back to original backend
box_utils.backend.reset()
return bounding_boxes
@keras_export("keras.utils.bounding_boxes.affine_transform")
def affine_transform(
boxes,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
center_x=None,
center_y=None,
bounding_box_format="xyxy",
):
"""Applies an affine transformation to the bounding boxes.
The `height` and `width` parameters are used to normalize the
translation and scaling factors.
Args:
boxes: The bounding boxes to transform, a tensor/array of shape
`(N, 4)` or `(batch_size, N, 4)`.
angle: Rotation angle in degrees.
translate_x: Horizontal translation fraction.
translate_y: Vertical translation fraction.
scale: Scaling factor.
shear_x: Shear angle in x-direction (degrees).
shear_y: Shear angle in y-direction (degrees).
height: Height of the image/data.
width: Width of the image/data.
center_x: x-coordinate of the transformation center (fraction).
center_y: y-coordinate of the transformation center (fraction).
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
The transformed bounding boxes, a tensor/array with the same shape
as the input `boxes`.
"""
if bounding_box_format != "xyxy":
raise NotImplementedError
box_utils = BoundingBox()
# Switch to tensorflow backend if we are in tf.data pipe
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
boxes = box_utils.affine(
boxes,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
center_x=center_x,
center_y=center_y,
)
box_utils.backend.reset()
return boxes
@keras_export("keras.utils.bounding_boxes.crop")
def crop(boxes, top, left, height, width, bounding_box_format="xyxy"):
"""Crops bounding boxes based on the given offsets and dimensions.
This function crops bounding boxes to a specified region defined by
`top`, `left`, `height`, and `width`. The boxes are first converted to
`xyxy` format, cropped, and then returned.
Args:
boxes: The bounding boxes to crop. A NumPy array or tensor of shape
`(N, 4)` or `(batch_size, N, 4)`.
top: The vertical offset of the top-left corner of the cropping region.
left: The horizontal offset of the top-left corner of the cropping
region.
height: The height of the cropping region. Defaults to `None`.
width: The width of the cropping region. Defaults to `None`.
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
The cropped bounding boxes.
Example:
```python
boxes = np.array([[10, 20, 50, 60], [70, 80, 100, 120]]) # xyxy format
cropped_boxes = keras.utils.bounding_boxes.crop(
boxes, bounding_box_format="xyxy", top=10, left=20, height=40, width=30
) # Cropping a 30x40 region starting at (20, 10)
print(cropped_boxes)
# Expected output:
# array([[ 0., 10., 30., 50.],
# [50., 70., 80., 110.]])
"""
if bounding_box_format != "xyxy":
raise NotImplementedError
box_utils = BoundingBox()
# Switch to tensorflow backend if we are in tf.data pipe
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
outputs = box_utils.crop(boxes, top, left, height, width)
box_utils.backend.reset()
return outputs
@keras_export("keras.utils.bounding_boxes.pad")
def pad(boxes, top, left, height=None, width=None, bounding_box_format="xyxy"):
"""Pads bounding boxes by adding top and left offsets.
This function adds padding to the bounding boxes by increasing the 'top'
and 'left' coordinates by the specified amounts. The method assume the
input bounding_box_format is `xyxy`.
Args:
boxes: Bounding boxes to pad. Shape `(N, 4)` or `(batch, N, 4)`.
top: Vertical padding to add.
left: Horizontal padding to add.
height: Image height. Defaults to None.
width: Image width. Defaults to None.
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
Padded bounding boxes in the original format.
"""
if bounding_box_format != "xyxy":
raise NotImplementedError
box_utils = BoundingBox()
# Switch to tensorflow backend if we are in tf.data pipe
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
outputs = box_utils.pad(boxes, top, left)
box_utils.backend.reset()
return outputs
@keras_export("keras.utils.bounding_boxes.encode_box_to_deltas")
def encode_box_to_deltas(
anchors,
boxes,
anchor_format,
box_format,
encoding_format="center_yxhw",
variance=None,
image_shape=None,
):
"""Encodes bounding boxes relative to anchors as deltas.
This function calculates the deltas that represent the difference between
bounding boxes and provided anchors. Deltas encode the offsets and scaling
factors to apply to anchors to obtain the target boxes.
Boxes and anchors are first converted to the specified `encoding_format`
(defaulting to `center_yxhw`) for consistent delta representation.
Args:
anchors: `Tensors`. Anchor boxes with shape of `(N, 4)` where N is the
number of anchors.
boxes: `Tensors` Bounding boxes to encode. Boxes can be of shape
`(B, N, 4)` or `(N, 4)`.
anchor_format: str. The format of the input `anchors`
(e.g., "xyxy", "xywh", etc.).
box_format: str. The format of the input `boxes`
(e.g., "xyxy", "xywh", etc.).
encoding_format: str. The intermediate format to which boxes and anchors
are converted before delta calculation. Defaults to "center_yxhw".
variance: `List[float]`. A 4-element array/tensor representing variance
factors to scale the box deltas. If provided, the calculated deltas
are divided by the variance. Defaults to None.
image_shape: `Tuple[int]`. The shape of the image (height, width, 3).
When using relative bounding box format for `box_format` the
`image_shape` is used for normalization.
Returns:
Encoded box deltas. The return type matches the `encode_format`.
Raises:
ValueError: If `variance` is not None and its length is not 4.
ValueError: If `encoding_format` is not `"center_xywh"` or
`"center_yxhw"`.
"""
if variance is not None:
variance = ops.convert_to_tensor(variance, "float32")
var_len = variance.shape[-1]
if var_len != 4:
raise ValueError(f"`variance` must be length 4, got {variance}")
if encoding_format not in ["center_xywh", "center_yxhw"]:
raise ValueError(
"`encoding_format` should be one of 'center_xywh' or "
f"'center_yxhw', got {encoding_format}"
)
if image_shape is None:
height, width = None, None
else:
height, width, _ = image_shape
encoded_anchors = convert_format(
anchors,
source=anchor_format,
target=encoding_format,
height=height,
width=width,
)
boxes = convert_format(
boxes,
source=box_format,
target=encoding_format,
height=height,
width=width,
)
anchor_dimensions = ops.maximum(encoded_anchors[..., 2:], backend.epsilon())
box_dimensions = ops.maximum(boxes[..., 2:], backend.epsilon())
# anchors be unbatched, boxes can either be batched or unbatched.
boxes_delta = ops.concatenate(
[
(boxes[..., :2] - encoded_anchors[..., :2]) / anchor_dimensions,
ops.log(box_dimensions / anchor_dimensions),
],
axis=-1,
)
if variance is not None:
boxes_delta /= variance
return boxes_delta
@keras_export("keras.utils.bounding_boxes.decode_deltas_to_boxes")
def decode_deltas_to_boxes(
anchors,
boxes_delta,
anchor_format,
box_format,
encoded_format="center_yxhw",
variance=None,
image_shape=None,
):
"""Converts bounding boxes from delta format to the specified `box_format`.
This function decodes bounding box deltas relative to anchors to obtain the
final bounding box coordinates. The boxes are encoded in a specific
`encoded_format` (center_yxhw by default) during the decoding process.
This allows flexibility in how the deltas are applied to the anchors.
Args:
anchors: Can be `Tensors` or `Dict[Tensors]` where keys are level
indices and values are corresponding anchor boxes.
The shape of the array/tensor should be `(N, 4)` where N is the
number of anchors.
boxes_delta Can be `Tensors` or `Dict[Tensors]` Bounding box deltas
must have the same type and structure as `anchors`. The
shape of the array/tensor can be `(N, 4)` or `(B, N, 4)` where N is
the number of boxes.
anchor_format: str. The format of the input `anchors`.
(e.g., `"xyxy"`, `"xywh"`, etc.)
box_format: str. The desired format for the output boxes.
(e.g., `"xyxy"`, `"xywh"`, etc.)
encoded_format: str. Raw output format from regression head. Defaults
to `"center_yxhw"`.
variance: `List[floats]`. A 4-element array/tensor representing
variance factors to scale the box deltas. If provided, the deltas
are multiplied by the variance before being applied to the anchors.
Defaults to None.
image_shape: `Tuple[int]`. The shape of the image (height, width, 3).
When using relative bounding box format for `box_format` the
`image_shape` is used for normalization.
Returns:
Decoded box coordinates. The return type matches the `box_format`.
Raises:
ValueError: If `variance` is not None and its length is not 4.
ValueError: If `encoded_format` is not `"center_xywh"` or
`"center_yxhw"`.
"""
if variance is not None:
variance = ops.convert_to_tensor(variance, "float32")
var_len = variance.shape[-1]
if var_len != 4:
raise ValueError(f"`variance` must be length 4, got {variance}")
if encoded_format not in ["center_xywh", "center_yxhw"]:
raise ValueError(
f"`encoded_format` should be 'center_xywh' or 'center_yxhw', "
f"but got '{encoded_format}'."
)
if image_shape is None:
height, width = None, None
else:
height, width, _ = image_shape
def decode_single_level(anchor, box_delta):
encoded_anchor = convert_format(
anchor,
source=anchor_format,
target=encoded_format,
height=height,
width=width,
)
if variance is not None:
box_delta = box_delta * variance
# anchors be unbatched, boxes can either be batched or unbatched.
box = ops.concatenate(
[
box_delta[..., :2] * encoded_anchor[..., 2:]
+ encoded_anchor[..., :2],
ops.exp(box_delta[..., 2:]) * encoded_anchor[..., 2:],
],
axis=-1,
)
box = convert_format(
box,
source=encoded_format,
target=box_format,
height=height,
width=width,
)
return box
if isinstance(anchors, dict) and isinstance(boxes_delta, dict):
boxes = {}
for lvl, anchor in anchors.items():
boxes[lvl] = decode_single_level(anchor, boxes_delta[lvl])
return boxes
else:
return decode_single_level(anchors, boxes_delta)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou_test.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou_test.py | """Tests for iou functions."""
import numpy as np
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import (
iou as iou_lib,
)
class IoUTest(testing.TestCase):
def test_compute_single_iou(self):
bb1 = np.array([[100, 101, 200, 201]])
bb1_off_by_1 = np.array([[101, 102, 201, 202]])
# area of bb1 and bb1_off_by_1 are each 10000.
# intersection area is 99*99=9801
# iou=9801/(2*10000 - 9801)=0.96097656633
self.assertAllClose(
iou_lib.compute_iou(bb1, bb1_off_by_1, "yxyx")[0], [0.96097656633]
)
def test_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float32,
)
sample_y_true = np.array([bb1, top_left_bounding_box, far_away_box])
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_boxes1_unbatched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_unbatched_boxes1_batched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
class CIoUTest(testing.TestCase):
def test_compute_single_ciou(self):
bb1 = np.array([[100, 101, 200, 201]])
bb2 = np.array([[101, 102, 201, 202]])
self.assertAllClose(
iou_lib.compute_ciou(bb1, bb2, "yxyx")[0], [0.96087853672]
)
def test_compute_ciou(self):
bb1 = np.array([100, 101, 200, 201])
bb2 = np.array([150, 150, 250, 250])
ciou_bb1_bb2 = 0.036492417
# non overlapping case
far_away_bb1 = np.array([1000, 1000, 1500, 1500])
far_away_bb2 = np.array([2000, 2000, 2500, 2500])
ciou_far_away_bb1_bb2 = -0.44444444435
sample_y_true = np.array([bb1, far_away_bb1])
sample_y_pred = np.array([bb2, far_away_bb2])
result = iou_lib.compute_ciou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(ciou_bb1_bb2, result[0])
self.assertAllClose(ciou_far_away_bb1_bb2, result[1])
def test_batched_compute_ciou(self):
bb1 = np.array([100, 101, 200, 201])
bb2 = np.array([150, 150, 250, 250])
ciou_bb1_bb2 = 0.036492417
# non overlapping case
far_away_bb1 = np.array([1000, 1000, 1500, 1500])
far_away_bb2 = np.array([2000, 2000, 2500, 2500])
ciou_far_away_bb1_bb2 = -0.44444444435
sample_y_true = np.array([[bb1, far_away_bb1], [bb1, far_away_bb1]])
sample_y_pred = np.array([[bb2, far_away_bb2], [bb2, far_away_bb2]])
result = iou_lib.compute_ciou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(ciou_bb1_bb2, result[0][0])
self.assertAllClose(ciou_bb1_bb2, result[1][0])
self.assertAllClose(ciou_far_away_bb1_bb2, result[0][1])
self.assertAllClose(ciou_far_away_bb1_bb2, result[1][1])
def test_batched_boxes1_unbatched_boxes2(self):
bb1 = np.array([100, 101, 200, 201])
bb2 = np.array([150, 150, 250, 250])
ciou_bb1_bb2 = 0.036492417
# non overlapping case
far_away_bb1 = np.array([1000, 1000, 1500, 1500])
far_away_bb2 = np.array([2000, 2000, 2500, 2500])
ciou_far_away_bb1_bb2 = -0.44444444435
sample_y_true = np.array([[bb1, far_away_bb1], [bb1, far_away_bb1]])
sample_y_pred = np.array([bb2, far_away_bb2])
result = iou_lib.compute_ciou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(ciou_bb1_bb2, result[0][0])
self.assertAllClose(ciou_bb1_bb2, result[1][0])
self.assertAllClose(ciou_far_away_bb1_bb2, result[0][1])
self.assertAllClose(ciou_far_away_bb1_bb2, result[1][1])
def test_unbatched_boxes1_batched_boxes2(self):
bb1 = np.array([100, 101, 200, 201])
bb2 = np.array([150, 150, 250, 250])
ciou_bb1_bb2 = 0.036492417
# non overlapping case
far_away_bb1 = np.array([1000, 1000, 1500, 1500])
far_away_bb2 = np.array([2000, 2000, 2500, 2500])
ciou_far_away_bb1_bb2 = -0.44444444435
sample_y_true = np.array([bb1, far_away_bb1])
sample_y_pred = np.array([[bb2, far_away_bb2], [bb2, far_away_bb2]])
result = iou_lib.compute_ciou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(ciou_bb1_bb2, result[0][0])
self.assertAllClose(ciou_bb1_bb2, result[1][0])
self.assertAllClose(ciou_far_away_bb1_bb2, result[0][1])
self.assertAllClose(ciou_far_away_bb1_bb2, result[1][1])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py | from keras.src import backend as current_backend
from keras.src.utils import tf_utils
def _classes_shape(batched, classes_shape, max_boxes):
if max_boxes is None:
return None
if batched:
return [None, max_boxes] + classes_shape[2:]
return [max_boxes] + classes_shape[1:]
def _box_shape(batched, boxes_shape, max_boxes):
# ensure we dont drop the final axis in RaggedTensor mode
if max_boxes is None:
shape = list(boxes_shape)
shape[-1] = 4
return shape
if batched:
return [None, max_boxes, 4]
return [max_boxes, 4]
def densify_bounding_boxes(
bounding_boxes,
is_batched=False,
max_boxes=None,
boxes_default_value=0,
labels_default_value=-1,
backend=None,
):
validate_bounding_boxes(bounding_boxes)
boxes = bounding_boxes["boxes"]
labels = bounding_boxes["labels"]
backend = backend or current_backend
if isinstance(boxes, list):
if boxes and isinstance(boxes[0], list):
if boxes[0] and isinstance(boxes[0][0], list):
# Batched case
if not isinstance(labels[0][0], int):
raise ValueError(
"If providing `bounding_boxes['labels']` as a list, "
"it should contain integers labels. Received: "
f"bounding_boxes['labels']={labels}"
)
if max_boxes is not None:
max_boxes = max([len(b) for b in boxes])
new_boxes = []
new_labels = []
for b, l in zip(boxes, labels):
if len(b) >= max_boxes:
new_boxes.append(b[:max_boxes])
new_labels.append(l[:max_boxes])
else:
num_boxes_to_add = max_boxes - len(b)
added_boxes = [
[
boxes_default_value,
boxes_default_value,
boxes_default_value,
boxes_default_value,
]
for _ in range(num_boxes_to_add)
]
new_boxes.append(b + added_boxes)
new_labels.append(
l
+ [
labels_default_value
for _ in range(num_boxes_to_add)
]
)
else:
# Unbatched case
if max_boxes and len(b) >= max_boxes:
new_boxes = b[:max_boxes]
new_labels = l[:max_boxes]
else:
num_boxes_to_add = max_boxes - len(b)
added_boxes = [
[
boxes_default_value,
boxes_default_value,
boxes_default_value,
boxes_default_value,
]
for _ in range(num_boxes_to_add)
]
new_boxes = b + added_boxes
new_labels = l + [
labels_default_value for _ in range(num_boxes_to_add)
]
return {
"boxes": backend.convert_to_tensor(new_boxes, dtype="float32"),
"labels": backend.convert_to_tensor(new_labels, dtype="int32"),
}
if tf_utils.is_ragged_tensor(boxes):
bounding_boxes["boxes"] = bounding_boxes["boxes"].to_tensor(
default_value=boxes_default_value,
shape=_box_shape(
is_batched, bounding_boxes["boxes"].shape, max_boxes
),
)
bounding_boxes["labels"] = bounding_boxes["labels"].to_tensor(
default_value=labels_default_value,
shape=_classes_shape(
is_batched, bounding_boxes["labels"].shape, max_boxes
),
)
return bounding_boxes
bounding_boxes["boxes"] = backend.convert_to_tensor(boxes, dtype="float32")
bounding_boxes["labels"] = backend.convert_to_tensor(labels)
return bounding_boxes
def validate_bounding_boxes(bounding_boxes):
if (
not isinstance(bounding_boxes, dict)
or "labels" not in bounding_boxes
or "boxes" not in bounding_boxes
):
raise ValueError(
"Expected `bounding_boxes` agurment to be a "
"dict with keys 'boxes' and 'labels'. Received: "
f"bounding_boxes={bounding_boxes}"
)
boxes = bounding_boxes["boxes"]
labels = bounding_boxes["labels"]
if isinstance(boxes, list):
if not isinstance(labels, list):
raise ValueError(
"If `bounding_boxes['boxes']` is a list, then "
"`bounding_boxes['labels']` must also be a list."
f"Received: bounding_boxes['labels']={labels}"
)
if len(boxes) != len(labels):
raise ValueError(
"If `bounding_boxes['boxes']` and "
"`bounding_boxes['labels']` are both lists, "
"they must have the same length. Received: "
f"len(bounding_boxes['boxes'])={len(boxes)} and "
f"len(bounding_boxes['labels'])={len(labels)} and "
)
elif tf_utils.is_ragged_tensor(boxes):
if not tf_utils.is_ragged_tensor(labels):
raise ValueError(
"If `bounding_boxes['boxes']` is a Ragged tensor, "
" `bounding_boxes['labels']` must also be a "
"Ragged tensor. "
f"Received: bounding_boxes['labels']={labels}"
)
else:
boxes_shape = current_backend.shape(boxes)
labels_shape = current_backend.shape(labels)
if len(boxes_shape) == 2: # (boxes, 4)
if len(labels_shape) not in {1, 2}:
raise ValueError(
"Found "
f"bounding_boxes['boxes'].shape={boxes_shape} "
"and expected bounding_boxes['labels'] to have "
"rank 1 or 2, but received: "
f"bounding_boxes['labels'].shape={labels_shape} "
)
elif len(boxes_shape) == 3:
if len(labels_shape) not in {2, 3}:
raise ValueError(
"Found "
f"bounding_boxes['boxes'].shape={boxes_shape} "
"and expected bounding_boxes['labels'] to have "
"rank 2 or 3, but received: "
f"bounding_boxes['labels'].shape={labels_shape} "
)
else:
raise ValueError(
"Expected `bounding_boxes['boxes']` "
"to have rank 2 or 3, with shape "
"(num_boxes, 4) or (batch_size, num_boxes, 4). "
"Received: "
f"bounding_boxes['boxes'].shape={boxes_shape}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py | import math
from keras.src.utils import backend_utils
SUPPORTED_FORMATS = (
"xyxy",
"yxyx",
"xywh",
"center_xywh",
"center_yxhw",
"rel_xyxy",
"rel_yxyx",
"rel_xywh",
"rel_center_xywh",
)
class BoundingBox:
def __init__(self):
self.backend = backend_utils.DynamicBackend()
def convert_format(
self,
boxes,
source,
target,
height=None,
width=None,
dtype="float32",
):
if isinstance(boxes, dict):
boxes["boxes"] = self.convert_format(
boxes["boxes"],
source=source,
target=target,
height=height,
width=width,
dtype=dtype,
)
return boxes
to_xyxy_converters = {
"xyxy": self._xyxy_to_xyxy,
"yxyx": self._yxyx_to_xyxy,
"xywh": self._xywh_to_xyxy,
"center_xywh": self._center_xywh_to_xyxy,
"center_yxhw": self._center_yxhw_to_xyxy,
"rel_xyxy": self._rel_xyxy_to_xyxy,
"rel_yxyx": self._rel_yxyx_to_xyxy,
"rel_xywh": self._rel_xywh_to_xyxy,
"rel_center_xywh": self._rel_center_xywh_to_xyxy,
}
from_xyxy_converters = {
"xyxy": self._xyxy_to_xyxy,
"yxyx": self._xyxy_to_yxyx,
"xywh": self._xyxy_to_xywh,
"center_xywh": self._xyxy_to_center_xywh,
"center_yxhw": self._xyxy_to_center_yxhw,
"rel_xyxy": self._xyxy_to_rel_xyxy,
"rel_yxyx": self._xyxy_to_rel_yxyx,
"rel_xywh": self._xyxy_to_rel_xywh,
"rel_center_xywh": self._xyxy_to_rel_center_xywh,
}
ops = self.backend
boxes_shape = ops.shape(boxes)
if boxes_shape[-1] != 4:
raise ValueError(
"`boxes` must be a tensor with the last dimension of 4. "
f"Received: boxes.shape={boxes_shape}"
)
source = source.lower()
target = target.lower()
if source not in SUPPORTED_FORMATS or target not in SUPPORTED_FORMATS:
raise ValueError(
f"Invalid source or target format. "
f"Supported formats: {SUPPORTED_FORMATS}"
)
if (source.startswith("rel_") or target.startswith("rel_")) and (
width is None or height is None
):
raise ValueError(
"convert_format() must receive `height` and `width` "
"transforming between relative and absolute formats."
f"convert_format() received source=`{source}`, "
f"target=`{target}, "
f"but height={height} and width={width}."
)
boxes = ops.cast(boxes, dtype)
if source == target:
return boxes
if width is not None:
width = ops.cast(width, dtype)
if height is not None:
height = ops.cast(height, dtype)
if source.startswith("rel_") and target.startswith("rel_"):
source = source.replace("rel_", "", 1)
target = target.replace("rel_", "", 1)
to_xyxy_converter = to_xyxy_converters[source]
from_xyxy_converter = from_xyxy_converters[target]
in_xyxy_boxes = to_xyxy_converter(boxes, height, width)
return from_xyxy_converter(in_xyxy_boxes, height, width)
def clip_to_image_size(
self,
bounding_boxes,
height=None,
width=None,
bounding_box_format="xyxy",
):
if bounding_box_format not in ("xyxy", "rel_xyxy"):
raise NotImplementedError
if bounding_box_format == "xyxy" and (height is None or width is None):
raise ValueError(
"`height` and `width` must be set if `format='xyxy'`."
)
ops = self.backend
boxes, labels = bounding_boxes["boxes"], bounding_boxes["labels"]
if width is not None:
width = ops.cast(width, boxes.dtype)
if height is not None:
height = ops.cast(height, boxes.dtype)
if bounding_box_format == "xyxy":
x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = ops.numpy.clip(x1, 0, width)
y1 = ops.numpy.clip(y1, 0, height)
x2 = ops.numpy.clip(x2, 0, width)
y2 = ops.numpy.clip(y2, 0, height)
boxes = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1)
areas = self._compute_area(boxes)
areas = ops.numpy.squeeze(areas, axis=-1)
labels = ops.numpy.where(areas > 0, labels, -1)
elif bounding_box_format == "rel_xyxy":
x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = ops.numpy.clip(x1, 0.0, 1.0)
y1 = ops.numpy.clip(y1, 0.0, 1.0)
x2 = ops.numpy.clip(x2, 0.0, 1.0)
y2 = ops.numpy.clip(y2, 0.0, 1.0)
boxes = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1)
areas = self._compute_area(boxes)
areas = ops.numpy.squeeze(areas, axis=-1)
labels = ops.numpy.where(areas > 0, labels, -1)
result = bounding_boxes.copy()
result["boxes"] = boxes
result["labels"] = labels
return result
def affine(
self,
boxes,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
center_x=None,
center_y=None,
):
ops = self.backend
boxes_shape = ops.shape(boxes)
batch_size = boxes_shape[0]
n_boxes = boxes_shape[1]
if center_x is None:
center_x = 0.5
if center_y is None:
center_y = 0.5
matrix = self._compute_inverse_affine_matrix(
center_x,
center_y,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
)
boxes = ops.cast(boxes, dtype=matrix.dtype)
transposed_matrix = ops.numpy.transpose(matrix[:, :2, :], [0, 2, 1])
points = boxes # [B, N, 4]
points = ops.numpy.stack(
[
points[..., 0],
points[..., 1],
points[..., 2],
points[..., 1],
points[..., 2],
points[..., 3],
points[..., 0],
points[..., 3],
],
axis=-1,
)
points = ops.numpy.reshape(points, [batch_size, n_boxes, 4, 2])
points = ops.numpy.concatenate(
[
points,
ops.numpy.ones([batch_size, n_boxes, 4, 1], points.dtype),
],
axis=-1,
)
transformed_points = ops.numpy.einsum(
"bnxy,byz->bnxz", points, transposed_matrix
)
boxes_min = ops.numpy.amin(transformed_points, axis=2)
boxes_max = ops.numpy.amax(transformed_points, axis=2)
outputs = ops.numpy.concatenate([boxes_min, boxes_max], axis=-1)
return outputs
def crop(self, boxes, top, left, height, width):
ops = self.backend
x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = x1 - left
y1 = y1 - top
x2 = x2 - left
y2 = y2 - top
x1 = ops.numpy.clip(x1, 0, width)
y1 = ops.numpy.clip(y1, 0, height)
x2 = ops.numpy.clip(x2, 0, width)
y2 = ops.numpy.clip(y2, 0, height)
outputs = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1)
return outputs
def pad(self, boxes, top, left):
ops = self.backend
x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = x1 + left
y1 = y1 + top
x2 = x2 + left
y2 = y2 + top
outputs = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1)
return outputs
# Converters
def _xyxy_to_xyxy(self, boxes, height=None, width=None):
return boxes
def _yxyx_to_xyxy(self, boxes, height=None, width=None):
y1, x1, y2, x2 = self.backend.numpy.split(boxes, 4, axis=-1)
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _xywh_to_xyxy(self, boxes, height=None, width=None):
x1, y1, w, h = self.backend.numpy.split(boxes, 4, axis=-1)
x2 = x1 + w
y2 = y1 + h
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _center_xywh_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
cx, cy, w, h = ops.numpy.split(boxes, 4, axis=-1)
half_w = w / 2.0
half_h = h / 2.0
x1 = cx - half_w
y1 = cy - half_h
x2 = cx + half_w
y2 = cy + half_h
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _center_yxhw_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
cy, cx, h, w = ops.numpy.split(boxes, 4, axis=-1)
half_w = w / 2.0
half_h = h / 2.0
x1 = cx - half_w
y1 = cy - half_h
x2 = cx + half_w
y2 = cy + half_h
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _rel_xyxy_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
rel_x1, rel_y1, rel_x2, rel_y2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = rel_x1 * width
y1 = rel_y1 * height
x2 = rel_x2 * width
y2 = rel_y2 * height
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _rel_yxyx_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
rel_y1, rel_x1, rel_y2, rel_x2 = ops.numpy.split(boxes, 4, axis=-1)
x1 = rel_x1 * width
y1 = rel_y1 * height
x2 = rel_x2 * width
y2 = rel_y2 * height
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _rel_xywh_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
rel_x1, rel_y1, rel_w, rel_h = ops.numpy.split(boxes, 4, axis=-1)
x1 = rel_x1 * width
y1 = rel_y1 * height
x2 = (rel_x1 + rel_w) * width
y2 = (rel_y1 + rel_h) * height
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _rel_center_xywh_to_xyxy(self, boxes, height=None, width=None):
ops = self.backend
rel_cx, rel_cy, rel_w, rel_h = ops.numpy.split(boxes, 4, axis=-1)
half_rel_w = rel_w / 2.0
half_rel_h = rel_h / 2.0
x1 = (rel_cx - half_rel_w) * height
y1 = (rel_cy - half_rel_h) * width
x2 = (rel_cx + half_rel_w) * height
y2 = (rel_cy + half_rel_h) * width
return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1)
def _xyxy_to_yxyx(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
return self.backend.numpy.concatenate([y1, x1, y2, x2], axis=-1)
def _xyxy_to_xywh(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
w = x2 - x1
h = y2 - y1
return self.backend.numpy.concatenate([x1, y1, w, h], axis=-1)
def _xyxy_to_center_xywh(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
cx = x1 + ((x2 - x1) / 2.0)
cy = y1 + ((y2 - y1) / 2.0)
w = x2 - x1
h = y2 - y1
return self.backend.numpy.concatenate([cx, cy, w, h], axis=-1)
def _xyxy_to_center_yxhw(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
cx = x1 + ((x2 - x1) / 2.0)
cy = y1 + ((y2 - y1) / 2.0)
w = x2 - x1
h = y2 - y1
return self.backend.numpy.concatenate([cy, cx, h, w], axis=-1)
def _xyxy_to_rel_xyxy(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
rel_x1 = self.backend.numpy.divide(x1, width)
rel_y1 = self.backend.numpy.divide(y1, height)
rel_x2 = self.backend.numpy.divide(x2, width)
rel_y2 = self.backend.numpy.divide(y2, height)
return self.backend.numpy.concatenate(
[rel_x1, rel_y1, rel_x2, rel_y2], axis=-1
)
def _xyxy_to_rel_yxyx(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
rel_x1 = self.backend.numpy.divide(x1, width)
rel_y1 = self.backend.numpy.divide(y1, height)
rel_x2 = self.backend.numpy.divide(x2, width)
rel_y2 = self.backend.numpy.divide(y2, height)
return self.backend.numpy.concatenate(
[rel_y1, rel_x1, rel_y2, rel_x2], axis=-1
)
def _xyxy_to_rel_xywh(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
rel_x1 = x1 / width
rel_y1 = y1 / height
rel_w = (x2 - x1) / width
rel_h = (y2 - y1) / height
return self.backend.numpy.concatenate(
[rel_x1, rel_y1, rel_w, rel_h], axis=-1
)
def _xyxy_to_rel_center_xywh(self, boxes, height=None, width=None):
x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1)
rel_cx = (x1 + ((x2 - x1) / 2.0)) / width
rel_cy = (y1 + ((y2 - y1) / 2.0)) / height
rel_w = (x2 - x1) / width
rel_h = (y2 - y1) / height
return self.backend.numpy.concatenate(
[rel_cx, rel_cy, rel_w, rel_h], axis=-1
)
# Clip
def _compute_area(self, boxes, format="xyxy"):
if format not in ("xyxy", "rel_xyxy"):
raise NotImplementedError
ops = self.backend
x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1)
widths = x2 - x1
heights = y2 - y1
return widths * heights
def _compute_inverse_affine_matrix(
self,
center_x,
center_y,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
):
# Ref: TF._geometry._get_inverse_affine_matrix
ops = self.backend
batch_size = ops.shape(angle)[0]
dtype = angle.dtype
angle = -angle
shear_x = -shear_x
shear_y = -shear_y
cx = ops.numpy.multiply(center_x, (width - 1))
cy = ops.numpy.multiply(center_y, (height - 1))
rot = ops.numpy.multiply(angle, 1.0 / 180.0 * math.pi)
tx = ops.numpy.multiply(-translate_x, (width - 1))
ty = ops.numpy.multiply(-translate_y, (height - 1))
sx = ops.numpy.multiply(shear_x, 1.0 / 180.0 * math.pi)
sy = ops.numpy.multiply(shear_y, 1.0 / 180.0 * math.pi)
# Cached results
cos_sy = ops.numpy.cos(sy)
tan_sx = ops.numpy.tan(sx)
rot_minus_sy = rot - sy
cx_plus_tx = cx + tx
cy_plus_ty = cy + ty
# Rotate Scale Shear (RSS) without scaling
a = ops.numpy.cos(rot_minus_sy) / cos_sy
b = a * tan_sx + ops.numpy.sin(rot)
c = -ops.numpy.sin(rot_minus_sy) / cos_sy
d = ops.numpy.cos(rot) - c * tan_sx
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
a0 = ops.numpy.multiply(d, scale)
a1 = ops.numpy.multiply(-b, scale)
b0 = ops.numpy.multiply(-c, scale)
b1 = ops.numpy.multiply(a, scale)
a2 = cx - a0 * cx_plus_tx - a1 * cy_plus_ty
b2 = cy - b0 * cx_plus_tx - b1 * cy_plus_ty
# Shape of matrix: [[batch_size], ...] -> [batch_size, 6]
matrix = ops.numpy.stack(
[
a0,
a1,
a2,
b0,
b1,
b2,
ops.numpy.zeros([batch_size], dtype),
ops.numpy.zeros([batch_size], dtype),
ops.numpy.ones([batch_size], dtype),
],
axis=-1,
)
matrix = ops.numpy.reshape(matrix, [batch_size, 3, 3])
return matrix
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters_test.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters_test.py | import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
class ConvertersTest(testing.TestCase):
def setUp(self):
xyxy_box = np.array(
[[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32"
)
yxyx_box = np.array(
[[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32"
)
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]],
dtype="float32",
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]],
dtype="float32",
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
center_yxhw_box = np.array(
[[[70, 60, 100, 100], [80, 70, 100, 100]]], dtype="float32"
)
xywh_box = np.array(
[[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32"
)
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
self.images = np.ones([2, 1000, 1000, 3], dtype="float32")
self.height = 1000
self.width = 1000
self.boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
"center_yxhw": center_yxhw_box,
}
@parameterized.named_parameters(
*[
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
[
"xyxy",
"yxyx",
"xywh",
"rel_xyxy",
"rel_yxyx",
"center_xywh",
"center_yxhw",
],
2,
)
]
+ [("xyxy_xyxy", "xyxy", "xyxy")]
)
def test_convert_all_formats(self, source, target):
source_box = self.boxes[source]
target_box = self.boxes[target]
self.assertAllClose(
convert_format(
source_box,
source=source,
target=target,
height=self.height,
width=self.width,
),
target_box,
)
def test_convert_format_invalid_source(self):
boxes = self.boxes["xywh"]
with self.assertRaises(ValueError):
convert_format(boxes, source="invalid", target="xywh")
def test_convert_format_invalid_target(self):
boxes = self.boxes["xyxy"]
with self.assertRaises(ValueError):
convert_format(boxes, source="xyxy", target="invalid")
def test_convert_format_missing_dimensions(self):
boxes = self.boxes["xyxy"]
with self.assertRaisesRegex(
ValueError, r"must receive `height` and `width`"
):
convert_format(boxes, source="xyxy", target="rel_xyxy")
def test_clip_to_image_size(self):
boxes = {
"boxes": np.array([[0.0, 0.0, 1.5, 1.6], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
expected_clipped = {
"boxes": np.array([[0.0, 0.0, 1.0, 1.0], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
clipped_boxes = clip_to_image_size(
boxes, bounding_box_format="rel_xyxy"
)
self.assertAllEqual(clipped_boxes, expected_clipped)
def test_affine_identity(self):
# Test identity transform (no change)
batch_size = self.boxes["xyxy"].shape[0]
transformed_boxes = affine_transform(
boxes=self.boxes["xyxy"],
angle=np.zeros([batch_size], dtype="float32"),
translate_x=np.zeros([batch_size], dtype="float32"),
translate_y=np.zeros([batch_size], dtype="float32"),
scale=np.ones([batch_size], dtype="float32"),
shear_x=np.zeros([batch_size], dtype="float32"),
shear_y=np.zeros([batch_size], dtype="float32"),
height=self.height,
width=self.width,
)
transformed_boxes = ops.convert_to_numpy(transformed_boxes)
self.assertAllClose(self.boxes["xyxy"], transformed_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation_test.py | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation_test.py | import pytest
import tensorflow as tf
from keras.src import backend
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import (
validation,
)
from keras.src.testing import test_case
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The test targets TensorFlow-specific ragged tensors.",
)
class DensifyBoundingBoxesTest(test_case.TestCase):
def test_densify_ragged_bounding_boxes_batched(self):
ragged_boxes = tf.ragged.constant(
[
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
[[0.5, 0.5, 0.6, 0.6]],
],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant(
[
[0, 1],
[2],
],
dtype=tf.int32,
)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 3
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=True, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (2, max_boxes, 4))
self.assertEqual(densified_labels.shape, (2, max_boxes))
expected_boxes = [
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4], [0.0, 0.0, 0.0, 0.0]],
[[0.5, 0.5, 0.6, 0.6], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
]
expected_labels = [
[0, 1, -1],
[2, -1, -1],
]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
def test_densify_ragged_bounding_boxes_unbatched(self):
ragged_boxes = tf.ragged.constant(
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant([[0], [1]], dtype=tf.int32)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 4
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=False, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (max_boxes, 4))
self.assertEqual(densified_labels.shape, (max_boxes, 1))
expected_boxes = [
[0.1, 0.1, 0.2, 0.2],
[0.3, 0.3, 0.4, 0.4],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
expected_labels = [[0], [1], [-1], [-1]]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/activation.py | keras/src/layers/activations/activation.py | from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
self._build_at_init()
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/relu.py | keras/src/layers/activations/relu.py | from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ReLU")
class ReLU(Layer):
"""Rectified Linear Unit activation function layer.
Formula:
``` python
f(x) = max(x,0)
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Example:
``` python
relu_layer = keras.layers.ReLU(
max_value=10,
negative_slope=0.5,
threshold=0,
)
input = np.array([-10, -5, 0.0, 5, 10])
result = relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
max_value: Float >= 0. Maximum activation value. None means unlimited.
Defaults to `None`.
negative_slope: Float >= 0. Negative slope coefficient.
Defaults to `0.0`.
threshold: Float >= 0. Threshold value for thresholded activation.
Defaults to `0.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs
):
super().__init__(**kwargs)
if max_value is not None and max_value < 0.0:
raise ValueError(
"max_value of a ReLU layer cannot be a negative "
f"value. Received: max_value={max_value}"
)
if negative_slope is None or negative_slope < 0.0:
raise ValueError(
"negative_slope of a ReLU layer cannot be a negative "
f"value. Received: negative_slope={negative_slope}"
)
if threshold is None or threshold < 0.0:
raise ValueError(
"threshold of a ReLU layer cannot be a negative "
f"value. Received: threshold={threshold}"
)
self.max_value = max_value
self.negative_slope = negative_slope
self.threshold = threshold
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.relu(
inputs,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_value": self.max_value,
"negative_slope": self.negative_slope,
"threshold": self.threshold,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/relu_test.py | keras/src/layers/activations/relu_test.py | import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import relu
class ReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_relu(self):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_normal_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.0, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([0.0, 0.0, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_leaky_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.5, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_threshold_relu_correctness(self):
relu_layer = relu.ReLU(max_value=8, negative_slope=0.0, threshold=5)
input = np.array([6.0, 7.0, 0.0, 5, 10])
expected_output = np.array([6.0, 7.0, 0.0, 0.0, 8.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"max_value of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": -10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError,
"negative_slope of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": -10,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError, "threshold of a ReLU layer cannot be a negative value"
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": -10,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/elu.py | keras/src/layers/activations/elu.py | from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/prelu_test.py | keras/src/layers/activations/prelu_test.py | import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import prelu
class PReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_prelu(self):
self.run_layer_test(
prelu.PReLU,
init_kwargs={
"alpha_initializer": "zeros",
"alpha_regularizer": "L1",
"alpha_constraint": "MaxNorm",
"shared_axes": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_prelu_correctness(self):
def np_prelu(x, alpha):
return (x > 0) * x + (x <= 0) * alpha * x
inputs = np.random.randn(2, 10, 5, 3)
prelu_layer = prelu.PReLU(
alpha_initializer="glorot_uniform",
alpha_regularizer="l1",
alpha_constraint="non_neg",
shared_axes=(1, 2),
)
prelu_layer.build(inputs.shape)
weights = np.random.random((1, 1, 3))
prelu_layer.alpha.assign(weights)
ref_out = np_prelu(inputs, weights)
self.assertAllClose(prelu_layer(inputs), ref_out)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/softmax.py | keras/src/layers/activations/softmax.py | from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._build_at_init()
def call(self, inputs, mask=None):
if mask is not None:
# We keep the positions where the mask is True or > 0.5, and set the
# other (masked) positions to -1e.9.
if backend.standardize_dtype(mask.dtype) != "bool":
mask = backend.numpy.greater(
mask, backend.cast(0.5, dtype=mask.dtype)
)
inputs = backend.numpy.where(
mask, inputs, _large_negative_number(inputs.dtype)
)
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
outputs = backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
outputs = activations.softmax(inputs, axis=self.axis[0])
else:
outputs = activations.softmax(inputs, axis=self.axis)
if mask is not None:
# Apply the mask to the softmax output to ensure that masked
# values are set to 0 in case the entire axis is masked.
outputs = backend.numpy.multiply(
outputs, backend.cast(mask, outputs.dtype)
)
return outputs
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/activation_test.py | keras/src/layers/activations/activation_test.py | import pytest
from keras.src import activations
from keras.src import layers
from keras.src import testing
class ActivationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_activation_basics(self):
self.run_layer_test(
layers.Activation,
init_kwargs={
"activation": "relu",
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.Activation,
init_kwargs={
"activation": activations.gelu,
},
input_shape=(2, 2),
expected_output_shape=(2, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/elu_test.py | keras/src/layers/activations/elu_test.py | import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import elu
class ELUTest(testing.TestCase):
def test_config(self):
elu_layer = elu.ELU()
self.run_class_serialization_test(elu_layer)
@pytest.mark.requires_trainable_backend
def test_elu(self):
self.run_layer_test(
elu.ELU,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_correctness(self):
def np_elu(x, alpha=1.0):
return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1)
x = np.random.random((2, 2, 5))
elu_layer = elu.ELU()
self.assertAllClose(elu_layer(x), np_elu(x))
elu_layer = elu.ELU(alpha=0.7)
self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/softmax_test.py | keras/src/layers/activations/softmax_test.py | import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_masked_values_are_zero_including_fully_masked(self):
"""
Tests softmax with mask on default axis (-1).
Ensures output is 0 where mask is False.
Includes a row where all elements are masked.
"""
softmax_layer = softmax.Softmax() # Default axis = -1
input = np.array(
[
[1.0, 2.0, 5.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[3.0, 1.0, 2.0, 4.0],
],
dtype=np.float32,
)
mask = np.array(
[
[True, True, False, False], # Partially masked
[False, False, False, False], # Fully masked
[True, True, True, True], # Not masked
],
dtype=bool,
)
expected_output = np.array(
[
[0.268941, 0.731059, 0.0, 0.0], # last two masked
[0.0, 0.0, 0.0, 0.0], # Fully masked row should be all zeros
[0.236883, 0.032059, 0.087144, 0.643914],
]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/__init__.py | keras/src/layers/activations/__init__.py | from keras.src.layers.activations.elu import ELU
from keras.src.layers.activations.leaky_relu import LeakyReLU
from keras.src.layers.activations.prelu import PReLU
from keras.src.layers.activations.relu import ReLU
from keras.src.layers.activations.softmax import Softmax
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/leaky_relu.py | keras/src/layers/activations/leaky_relu.py | import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the unit is not active.
Formula:
``` python
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Example:
``` python
leaky_relu_layer = LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
result = leaky_relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
negative_slope: Float >= 0.0. Negative slope coefficient.
Defaults to `0.3`.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, negative_slope=0.3, **kwargs):
if "alpha" in kwargs:
negative_slope = kwargs.pop("alpha")
warnings.warn(
"Argument `alpha` is deprecated. Use `negative_slope` instead."
)
super().__init__(**kwargs)
if negative_slope is None or negative_slope < 0:
raise ValueError(
"The negative_slope value of a Leaky ReLU layer "
"cannot be None or negative value. Expected a float."
f" Received: negative_slope={negative_slope}"
)
self.negative_slope = negative_slope
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.leaky_relu(
inputs, negative_slope=self.negative_slope
)
def get_config(self):
config = super().get_config()
config.update({"negative_slope": self.negative_slope})
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/prelu.py | keras/src/layers/activations/prelu.py | from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.PReLU")
class PReLU(Layer):
"""Parametric Rectified Linear Unit activation layer.
Formula:
``` python
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Args:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable parameters for the
activation function. For example, if the incoming feature maps are
from a 2D convolution with output shape
`(batch, height, width, channels)`, and you wish to share parameters
across space so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self,
alpha_initializer="Zeros",
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name="alpha",
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint,
)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
def call(self, inputs):
pos = activations.relu(inputs)
neg = -self.alpha * activations.relu(-inputs)
return pos + neg
def get_config(self):
config = super().get_config()
config.update(
{
"alpha_initializer": initializers.serialize(
self.alpha_initializer
),
"alpha_regularizer": regularizers.serialize(
self.alpha_regularizer
),
"alpha_constraint": constraints.serialize(
self.alpha_constraint
),
"shared_axes": self.shared_axes,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/activations/leaky_relu_test.py | keras/src/layers/activations/leaky_relu_test.py | import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/average_pooling2d.py | keras/src/layers/pooling/average_pooling2d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_max_pooling3d.py | keras/src/layers/pooling/adaptive_max_pooling3d.py | """Adaptive Max Pooling 3D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling3D")
class AdaptiveMaxPooling3D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 3D volumetric data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 3 integers specifying the
target output size.
If an integer, the same value is used for depth, height, and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, depth, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 5D tensor
`(batch_size, depth, height, width, channels)`
- If `data_format="channels_first"`: 5D tensor
`(batch_size, channels, depth, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_depth, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_depth, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_vol = np.random.rand(1, 32, 32, 32, 3)
>>> layer = AdaptiveMaxPooling3D(output_size=16)
>>> output_vol = layer(input_vol)
>>> output_vol.shape
(1, 16, 16, 16, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 3:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or "
f"(depth, height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/base_pooling.py | keras/src/layers/pooling/base_pooling.py | from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer."""
def __init__(
self,
pool_size,
strides,
pool_dimensions,
pool_mode="max",
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.pool_size = argument_validation.standardize_tuple(
pool_size, pool_dimensions, "pool_size"
)
strides = pool_size if strides is None else strides
self.strides = argument_validation.standardize_tuple(
strides, pool_dimensions, "strides", allow_zero=True
)
self.pool_mode = pool_mode
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self._build_at_init()
def call(self, inputs):
if self.pool_mode == "max":
return ops.max_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.pool_mode == "average":
return ops.average_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
else:
raise ValueError(
"`pool_mode` must be either 'max' or 'average'. Received: "
f"{self.pool_mode}."
)
def compute_output_shape(self, input_shape):
return compute_pooling_output_shape(
input_shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def get_config(self):
config = super().get_config()
config.update(
{
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_pooling3d_test.py | keras/src/layers/pooling/adaptive_pooling3d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling3DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling3D and AdaptiveMaxPooling3D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling3D basic shape transformation."""
shape = (2, 3, 8, 8, 8) # N,C,D,H,W
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling3D basic shape transformation."""
shape = (2, 3, 8, 8, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling3D with channels_last format."""
shape = (2, 8, 8, 8, 3) # N,D,H,W,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling3D with channels_last format."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_tuple_output_size(self):
"""Test AdaptiveAveragePooling3D with tuple output_size."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=(4, 4, 4),
data_format="channels_last",
)
def test_max_pooling_tuple_output_size(self):
"""Test AdaptiveMaxPooling3D with tuple output_size."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=(2, 4, 4),
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling3D."""
layer = layers.AdaptiveAveragePooling3D(
output_size=8, data_format="channels_last"
)
input_shape = (None, 32, 32, 32, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 8, 8, 8, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling3D."""
layer = layers.AdaptiveMaxPooling3D(
output_size=(4, 8, 8), data_format="channels_first"
)
input_shape = (2, 3, 32, 32, 32)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 4, 8, 8))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling3D."""
layer = layers.AdaptiveAveragePooling3D(
output_size=16, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (16, 16, 16))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling3D."""
layer = layers.AdaptiveMaxPooling3D(
output_size=(8, 16, 16), data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (8, 16, 16))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling3d_numerical(self):
"""Test AdaptiveAveragePooling3D numerical correctness."""
inputs = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
expected = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
layer = layers.AdaptiveAveragePooling3D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling3d_numerical(self):
"""Test AdaptiveMaxPooling3D numerical correctness."""
inputs = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
expected = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
layer = layers.AdaptiveMaxPooling3D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/average_pooling_test.py | keras/src/layers/pooling/average_pooling_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras.src import backend
from keras.src import layers
from keras.src import testing
def _same_padding(input_size, pool_size, stride):
if input_size % stride == 0:
return max(pool_size - stride, 0)
else:
return max(pool_size - (input_size % stride), 0)
def np_avgpool1d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.swapaxes(1, 2)
if isinstance(pool_size, (tuple, list)):
pool_size = pool_size[0]
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if padding == "same":
n_batch, h_x, ch_x = x.shape
pad_value = _same_padding(h_x, pool_size, h_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, pad_value)
x = np.pad(x, pad_width=npad, mode="edge")
n_batch, h_x, ch_x = x.shape
out_h = int((h_x - pool_size) / h_stride) + 1
stride_shape = (n_batch, out_h, ch_x, pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
x.strides[2],
x.strides[1],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.mean(windows, axis=(3,))
if data_format == "channels_first":
out = out.swapaxes(1, 2)
return out
def np_avgpool2d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
h_pool_size, w_pool_size = pool_size
h_stride, w_stride = strides
if padding == "same":
n_batch, h_x, w_x, ch_x = x.shape
h_padding = _same_padding(h_x, h_pool_size, h_stride)
w_padding = _same_padding(w_x, w_pool_size, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
x = np.pad(x, pad_width=npad, mode="edge")
n_batch, h_x, w_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
stride_shape = (n_batch, out_h, out_w, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
x.strides[3],
x.strides[1],
x.strides[2],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.mean(windows, axis=(4, 5))
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
def np_avgpool3d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
h_pool_size, w_pool_size, d_pool_size = pool_size
h_stride, w_stride, d_stride = strides
if padding == "same":
n_batch, h_x, w_x, d_x, ch_x = x.shape
h_padding = _same_padding(h_x, h_pool_size, h_stride)
w_padding = _same_padding(w_x, w_pool_size, w_stride)
d_padding = _same_padding(d_x, d_pool_size, d_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
npad[3] = (0, d_padding)
x = np.pad(x, pad_width=npad, mode="symmetric")
n_batch, h_x, w_x, d_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
out_d = int((d_x - d_pool_size) / d_stride) + 1
stride_shape = (n_batch, out_h, out_w, out_d, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
d_stride * x.strides[3],
x.strides[4],
x.strides[1],
x.strides[2],
x.strides[3],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.mean(windows, axis=(5, 6, 7))
if data_format == "channels_first":
out = out.transpose((0, 4, 1, 2, 3))
return out
@pytest.mark.requires_trainable_backend
class AveragePoolingBasicTest(testing.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 4), (3, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 4), (3, 5, 4)),
((2,), (2,), "valid", "channels_last", (3, 5, 4), (3, 2, 4)),
)
def test_average_pooling1d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.AveragePooling1D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 4), (3, 4, 4, 4)),
(2, 1, "same", "channels_last", (3, 5, 5, 4), (3, 5, 5, 4)),
(2, 1, "valid", "channels_first", (3, 5, 5, 4), (3, 5, 4, 3)),
(2, 1, "same", "channels_first", (3, 5, 5, 4), (3, 5, 5, 4)),
((2, 3), (2, 2), "valid", "channels_last", (3, 5, 5, 4), (3, 2, 2, 4)),
((2, 3), (2, 2), "same", "channels_last", (3, 5, 5, 4), (3, 3, 3, 4)),
((2, 3), (3, 3), "same", "channels_first", (3, 5, 5, 4), (3, 5, 2, 2)),
)
def test_average_pooling2d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.AveragePooling2D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 5, 4), (3, 4, 4, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 5, 5, 4), (3, 5, 5, 5, 4)),
(
(2, 3, 2),
(2, 2, 1),
"valid",
"channels_last",
(3, 5, 5, 5, 4),
(3, 2, 2, 4, 4),
),
)
def test_average_pooling3d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.AveragePooling3D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
assert_built_after_instantiation=True,
)
class AveragePoolingCorrectnessTest(testing.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2,), (2,), "valid", "channels_last"),
((2,), (2,), "valid", "channels_first"),
)
def test_average_pooling1d(self, pool_size, strides, padding, data_format):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.AveragePooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2,), (2,), "same", "channels_last"),
((2,), (2,), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling1d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.AveragePooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
((2, 3), (2, 2), "valid", "channels_last"),
)
def test_average_pooling2d(self, pool_size, strides, padding, data_format):
inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1))
layer = layers.AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, (2, 1), "same", "channels_last"),
(2, (2, 1), "same", "channels_first"),
((2, 2), (2, 2), "same", "channels_last"),
((2, 2), (2, 2), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling2d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1))
layer = layers.AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2, 3, 2), (2, 2, 1), "valid", "channels_last"),
((2, 3, 2), (2, 2, 1), "valid", "channels_first"),
)
def test_average_pooling3d(self, pool_size, strides, padding, data_format):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.AveragePooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2, 2, 2), (2, 2, 1), "same", "channels_last"),
((2, 2, 2), (2, 2, 1), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling3d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.AveragePooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_max_pooling_test.py | keras/src/layers/pooling/global_max_pooling_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
@pytest.mark.requires_trainable_backend
class GlobalMaxPoolingBasicTest(testing.TestCase):
@parameterized.parameters(
("channels_last", False, (3, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 4), (3, 1, 4)),
("channels_first", False, (3, 5, 4), (3, 5)),
)
def test_global_max_pooling1d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling1D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 4), (3, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 4), (3, 5)),
)
def test_global_max_pooling2d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling2D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 5, 4), (3, 1, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 5, 4), (3, 5)),
)
def test_global_max_pooling3d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling3D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
class GlobalMaxPoolingCorrectnessTest(testing.TestCase):
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling1d(self, data_format, keepdims):
def np_global_max_pool1d(x, data_format, keepdims):
steps_axis = [1] if data_format == "channels_last" else [2]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.GlobalMaxPooling1D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool1d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling2d(self, data_format, keepdims):
def np_global_max_pool2d(x, data_format, keepdims):
steps_axis = [1, 2] if data_format == "channels_last" else [2, 3]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4))
layer = layers.GlobalMaxPooling2D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool2d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling3d(self, data_format, keepdims):
def np_global_max_pool3d(x, data_format, keepdims):
steps_axis = (
[1, 2, 3] if data_format == "channels_last" else [2, 3, 4]
)
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4))
layer = layers.GlobalMaxPooling3D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool3d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/max_pooling1d.py | keras/src/layers/pooling/max_pooling1d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_pooling1d_test.py | keras/src/layers/pooling/adaptive_pooling1d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling1DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling1D and AdaptiveMaxPooling1D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling1D basic shape transformation."""
shape = (2, 3, 8) # N,C,L
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling1D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling1D basic shape transformation."""
shape = (2, 3, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling1D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling1D with channels_last format."""
shape = (2, 8, 3) # N,L,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling1D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling1D with channels_last format."""
shape = (2, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling1D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling1D."""
layer = layers.AdaptiveAveragePooling1D(
output_size=16, data_format="channels_last"
)
input_shape = (None, 64, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 16, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling1D."""
layer = layers.AdaptiveMaxPooling1D(
output_size=16, data_format="channels_first"
)
input_shape = (2, 3, 64)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 16))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling1D."""
layer = layers.AdaptiveAveragePooling1D(
output_size=32, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32,))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling1D."""
layer = layers.AdaptiveMaxPooling1D(
output_size=32, data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32,))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling_numerical(self):
"""Test AdaptiveAveragePooling1D numerical correctness."""
inputs = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]], dtype="float32")
expected = np.array([[[2.0, 5.0]]], dtype="float32")
layer = layers.AdaptiveAveragePooling1D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling_numerical(self):
"""Test AdaptiveMaxPooling1D numerical correctness."""
inputs = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]], dtype="float32")
expected = np.array([[[3.0, 6.0]]], dtype="float32")
layer = layers.AdaptiveMaxPooling1D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/average_pooling1d.py | keras/src/layers/pooling/average_pooling1d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
class AveragePooling1D(BasePooling):
"""Average pooling for temporal data.
Downsamples the input representation by taking the average value over the
window defined by `pool_size`. The window is shifted by `strides`. The
resulting output when using "valid" padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`
The resulting output shape when using the "same" padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="valid")
>>> avg_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=2, padding="valid")
>>> avg_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="same")
>>> avg_pool_1d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_average_pooling1d.py | keras/src/layers/pooling/adaptive_average_pooling1d.py | """Adaptive Average Pooling 1D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling1D")
class AdaptiveAveragePooling1D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 1D temporal or spatial data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target length specified by `output_size`,
regardless of the input length. The kernel size and stride are automatically
computed to achieve the target output size.
Args:
output_size: Integer specifying the target output length.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, length, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, length)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 3D tensor
`(batch_size, length, channels)`
- If `data_format="channels_first"`: 3D tensor
`(batch_size, channels, length)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_length, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_length)`
Examples:
>>> import numpy as np
>>> input_seq = np.random.rand(1, 64, 3)
>>> layer = AdaptiveAveragePooling1D(output_size=32)
>>> output_seq = layer(input_seq)
>>> output_seq.shape
(1, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size = (output_size,)
elif isinstance(output_size, (tuple, list)):
if len(output_size) != 1:
raise ValueError(
f"For 1D input, `output_size` tuple must have length 1. "
f"Received: {output_size}"
)
output_size = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or tuple of 1 integer. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_pooling2d_test.py | keras/src/layers/pooling/adaptive_pooling2d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling2DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling2D and AdaptiveMaxPooling2D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling2D basic shape transformation."""
shape = (2, 3, 8, 8) # N,C,H,W
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling2D basic shape transformation."""
shape = (2, 3, 8, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling2D with channels_last format."""
shape = (2, 8, 8, 3) # N,H,W,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling2D with channels_last format."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_tuple_output_size(self):
"""Test AdaptiveAveragePooling2D with tuple output_size."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=(4, 4),
data_format="channels_last",
)
def test_max_pooling_tuple_output_size(self):
"""Test AdaptiveMaxPooling2D with tuple output_size."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=(2, 4),
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling2D."""
layer = layers.AdaptiveAveragePooling2D(
output_size=16, data_format="channels_last"
)
input_shape = (None, 64, 64, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 16, 16, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling2D."""
layer = layers.AdaptiveMaxPooling2D(
output_size=(8, 16), data_format="channels_first"
)
input_shape = (2, 3, 64, 64)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 8, 16))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling2D."""
layer = layers.AdaptiveAveragePooling2D(
output_size=32, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32, 32))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling2D."""
layer = layers.AdaptiveMaxPooling2D(
output_size=(8, 16), data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (8, 16))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling2d_numerical(self):
"""Test AdaptiveAveragePooling2D numerical correctness."""
inputs = np.array(
[
[
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
]
],
dtype="float32",
)
expected = np.array([[[[3.5, 5.5], [11.5, 13.5]]]], dtype="float32")
layer = layers.AdaptiveAveragePooling2D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling2d_numerical(self):
"""Test AdaptiveMaxPooling2D numerical correctness."""
inputs = np.array(
[
[
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
]
],
dtype="float32",
)
expected = np.array([[[[6.0, 8.0], [14.0, 16.0]]]], dtype="float32")
layer = layers.AdaptiveMaxPooling2D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/max_pooling_test.py | keras/src/layers/pooling/max_pooling_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras.src import layers
from keras.src import testing
def _same_padding(input_size, pool_size, stride):
if input_size % stride == 0:
return max(pool_size - stride, 0)
else:
return max(pool_size - (input_size % stride), 0)
def np_maxpool1d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.swapaxes(1, 2)
if isinstance(pool_size, (tuple, list)):
pool_size = pool_size[0]
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if padding == "same":
n_batch, h_x, ch_x = x.shape
pad_value = _same_padding(h_x, pool_size, h_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, pad_value)
x = np.pad(x, pad_width=npad, mode="constant", constant_values=-np.inf)
n_batch, h_x, ch_x = x.shape
out_h = int((h_x - pool_size) / h_stride) + 1
stride_shape = (n_batch, out_h, ch_x, pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
x.strides[2],
x.strides[1],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(3,))
if data_format == "channels_first":
out = out.swapaxes(1, 2)
return out
def np_maxpool2d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
h_pool_size, w_pool_size = pool_size
h_stride, w_stride = strides
if padding == "same":
n_batch, h_x, w_x, ch_x = x.shape
h_padding = _same_padding(h_x, h_pool_size, h_stride)
w_padding = _same_padding(w_x, w_pool_size, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
x = np.pad(x, pad_width=npad, mode="constant", constant_values=-np.inf)
n_batch, h_x, w_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
stride_shape = (n_batch, out_h, out_w, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
x.strides[3],
x.strides[1],
x.strides[2],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(4, 5))
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
def np_maxpool3d(x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
h_pool_size, w_pool_size, d_pool_size = pool_size
h_stride, w_stride, d_stride = strides
if padding == "same":
n_batch, h_x, w_x, d_x, ch_x = x.shape
h_padding = _same_padding(h_x, h_pool_size, h_stride)
w_padding = _same_padding(w_x, w_pool_size, w_stride)
d_padding = _same_padding(d_x, d_pool_size, d_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
npad[3] = (0, d_padding)
x = np.pad(x, pad_width=npad, mode="constant", constant_values=-np.inf)
n_batch, h_x, w_x, d_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
out_d = int((d_x - d_pool_size) / d_stride) + 1
stride_shape = (n_batch, out_h, out_w, out_d, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
d_stride * x.strides[3],
x.strides[4],
x.strides[1],
x.strides[2],
x.strides[3],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(5, 6, 7))
if data_format == "channels_first":
out = out.transpose((0, 4, 1, 2, 3))
return out
@pytest.mark.requires_trainable_backend
class MaxPoolingBasicTest(testing.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 4), (3, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 4), (3, 5, 4)),
((2,), (2,), "valid", "channels_last", (3, 5, 4), (3, 2, 4)),
)
def test_max_pooling1d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling1D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 4), (3, 4, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 5, 4), (3, 5, 5, 4)),
((2, 3), (2, 2), "valid", "channels_last", (3, 5, 5, 4), (3, 2, 2, 4)),
)
def test_max_pooling2d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling2D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 5, 4), (3, 4, 4, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 5, 5, 4), (3, 5, 5, 5, 4)),
(
(2, 3, 2),
(2, 2, 1),
"valid",
"channels_last",
(3, 5, 5, 5, 4),
(3, 2, 2, 4, 4),
),
)
def test_max_pooling3d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling3D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
assert_built_after_instantiation=True,
)
class MaxPoolingCorrectnessTest(testing.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2,), (2,), "valid", "channels_last"),
((2,), (2,), "valid", "channels_first"),
)
def test_max_pooling1d(self, pool_size, strides, padding, data_format):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.MaxPooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_maxpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2, 2), (2, 2), "same", "channels_last"),
((2, 2), (2, 2), "same", "channels_first"),
((2, 3), (3, 3), "same", "channels_last"),
)
def test_max_pooling2d(self, pool_size, strides, padding, data_format):
inputs = np.arange(100, dtype="float32").reshape((1, 5, 5, 4))
layer = layers.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_maxpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "same", "channels_first"),
((2, 3, 2), (2, 2, 1), "valid", "channels_last"),
((2, 3, 2), (2, 2, 1), "valid", "channels_first"),
)
def test_max_pooling3d(self, pool_size, strides, padding, data_format):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.MaxPooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_maxpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_average_pooling2d.py | keras/src/layers/pooling/global_average_pooling2d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalAveragePooling2D",
"keras.layers.GlobalAvgPool2D",
]
)
class GlobalAveragePooling2D(BaseGlobalPooling):
"""Global average pooling operation for 2D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, height, width, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, height, width)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 3)
>>> y = keras.layers.GlobalAveragePooling2D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=2,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_average_pooling2d.py | keras/src/layers/pooling/adaptive_average_pooling2d.py | """Adaptive Average Pooling 2D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling2D")
class AdaptiveAveragePooling2D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 2D spatial data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 2 integers specifying the
target output size.
If an integer, the same value is used for both height and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 4D tensor
`(batch_size, height, width, channels)`
- If `data_format="channels_first"`: 4D tensor
`(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_img = np.random.rand(1, 64, 64, 3)
>>> layer = AdaptiveAveragePooling2D(output_size=32)
>>> output_img = layer(input_img)
>>> output_img.shape
(1, 32, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 2:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or (height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_max_pooling2d.py | keras/src/layers/pooling/adaptive_max_pooling2d.py | """Adaptive Max Pooling 2D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling2D")
class AdaptiveMaxPooling2D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 2D spatial data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 2 integers specifying the
target output size.
If an integer, the same value is used for both height and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 4D tensor
`(batch_size, height, width, channels)`
- If `data_format="channels_first"`: 4D tensor
`(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_img = np.random.rand(1, 64, 64, 3)
>>> layer = AdaptiveMaxPooling2D(output_size=32)
>>> output_img = layer(input_img)
>>> output_img.shape
(1, 32, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 2:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or (height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/base_global_pooling.py | keras/src/layers/pooling/base_global_pooling.py | from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self._build_at_init()
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_max_pooling2d.py | keras/src/layers/pooling/global_max_pooling2d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalMaxPooling2D",
"keras.layers.GlobalMaxPool2D",
]
)
class GlobalMaxPooling2D(BaseGlobalPooling):
"""Global max pooling operation for 2D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, height, width, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, height, width)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 3)
>>> y = keras.layers.GlobalMaxPooling2D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=2,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.max(inputs, axis=[1, 2], keepdims=self.keepdims)
return ops.max(inputs, axis=[2, 3], keepdims=self.keepdims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/average_pooling3d.py | keras/src/layers/pooling/average_pooling3d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"])
class AveragePooling3D(BasePooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the average value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_max_pooling1d.py | keras/src/layers/pooling/adaptive_max_pooling1d.py | """Adaptive Max Pooling 1D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling1D")
class AdaptiveMaxPooling1D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 1D temporal or spatial data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target length specified by `output_size`,
regardless of the input length. The kernel size and stride are automatically
computed to achieve the target output size.
Args:
output_size: Integer specifying the target output length.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, length, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, length)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 3D tensor
`(batch_size, length, channels)`
- If `data_format="channels_first"`: 3D tensor
`(batch_size, channels, length)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_length, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_length)`
Examples:
>>> import numpy as np
>>> input_seq = np.random.rand(1, 64, 3)
>>> layer = AdaptiveMaxPooling1D(output_size=32)
>>> output_seq = layer(input_seq)
>>> output_seq.shape
(1, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size = (output_size,)
elif isinstance(output_size, (tuple, list)):
if len(output_size) != 1:
raise ValueError(
f"For 1D input, `output_size` tuple must have length 1. "
f"Received: {output_size}"
)
output_size = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or tuple of 1 integer. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/__init__.py | keras/src/layers/pooling/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_average_pooling_test.py | keras/src/layers/pooling/global_average_pooling_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
@pytest.mark.requires_trainable_backend
class GlobalAveragePoolingBasicTest(testing.TestCase):
@parameterized.parameters(
("channels_last", False, (3, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 4), (3, 1, 4)),
("channels_first", False, (3, 5, 4), (3, 5)),
)
def test_global_average_pooling1d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling1D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 4), (3, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 4), (3, 5)),
)
def test_global_average_pooling2d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling2D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 5, 4), (3, 1, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 5, 4), (3, 5)),
)
def test_global_average_pooling3d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling3D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
assert_built_after_instantiation=True,
)
class GlobalAveragePoolingCorrectnessTest(testing.TestCase):
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling1d(self, data_format, keepdims):
def np_gap1d(x, data_format, keepdims, mask=None):
steps_axis = 1 if data_format == "channels_last" else 2
if mask is not None:
mask = np.expand_dims(
mask, 2 if data_format == "channels_last" else 1
)
x *= mask
res = np.sum(x, axis=steps_axis) / np.sum(mask, axis=steps_axis)
else:
res = np.mean(x, axis=steps_axis)
if keepdims:
res = np.expand_dims(res, axis=steps_axis)
return res
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.GlobalAveragePooling1D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap1d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
if data_format == "channels_last":
mask = np.array([[1, 1, 0], [0, 1, 0]], dtype="int32")
else:
mask = np.array([[1, 1, 0, 0], [0, 1, 0, 1]], dtype="int32")
outputs = layer(inputs, mask)
expected = np_gap1d(inputs, data_format, keepdims, mask)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling2d(self, data_format, keepdims):
def np_gap2d(x, data_format, keepdims):
steps_axis = [1, 2] if data_format == "channels_last" else [2, 3]
res = np.apply_over_axes(np.mean, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4))
layer = layers.GlobalAveragePooling2D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap2d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling3d(self, data_format, keepdims):
def np_gap3d(x, data_format, keepdims):
steps_axis = (
[1, 2, 3] if data_format == "channels_last" else [2, 3, 4]
)
res = np.apply_over_axes(np.mean, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4))
layer = layers.GlobalAveragePooling3D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap3d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_average_pooling1d.py | keras/src/layers/pooling/global_average_pooling1d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalAveragePooling1D",
"keras.layers.GlobalAvgPool1D",
]
)
class GlobalAveragePooling1D(BaseGlobalPooling):
"""Global average pooling operation for temporal data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
temporal dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
a given step should be masked (excluded from the average).
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, features)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, 1, features)`
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, 1)`
Example:
>>> x = np.random.rand(2, 3, 4)
>>> y = keras.layers.GlobalAveragePooling1D()(x)
>>> y.shape
(2, 4)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=1,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == "channels_last" else 2
if mask is not None:
mask = backend.cast(mask, inputs[0].dtype)
mask = ops.expand_dims(
mask, 2 if self.data_format == "channels_last" else 1
)
inputs *= mask
return ops.sum(
inputs, axis=steps_axis, keepdims=self.keepdims
) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims)
else:
return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims)
def compute_mask(self, inputs, mask=None):
return None
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_max_pooling3d.py | keras/src/layers/pooling/global_max_pooling3d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalMaxPooling3D",
"keras.layers.GlobalMaxPool3D",
]
)
class GlobalMaxPooling3D(BaseGlobalPooling):
"""Global max pooling operation for 3D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format="channels_first"`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 4, 3)
>>> y = keras.layers.GlobalMaxPooling3D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=3,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
return ops.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/adaptive_average_pooling3d.py | keras/src/layers/pooling/adaptive_average_pooling3d.py | """Adaptive Average Pooling 3D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling3D")
class AdaptiveAveragePooling3D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 3D volumetric data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 3 integers specifying the
target output size.
If an integer, the same value is used for depth, height, and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, depth, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 5D tensor
`(batch_size, depth, height, width, channels)`
- If `data_format="channels_first"`: 5D tensor
`(batch_size, channels, depth, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_depth, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_depth, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_vol = np.random.rand(1, 32, 32, 32, 3)
>>> layer = AdaptiveAveragePooling3D(output_size=16)
>>> output_vol = layer(input_vol)
>>> output_vol.shape
(1, 16, 16, 16, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 3:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or "
f"(depth, height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_max_pooling1d.py | keras/src/layers/pooling/global_max_pooling1d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalMaxPooling1D",
"keras.layers.GlobalMaxPool1D",
]
)
class GlobalMaxPooling1D(BaseGlobalPooling):
"""Global max pooling operation for temporal data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
temporal dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, features)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, 1, features)`
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, 1)`
Example:
>>> x = np.random.rand(2, 3, 4)
>>> y = keras.layers.GlobalMaxPooling1D()(x)
>>> y.shape
(2, 4)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=1,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
steps_axis = 1 if self.data_format == "channels_last" else 2
return ops.max(inputs, axis=steps_axis, keepdims=self.keepdims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/global_average_pooling3d.py | keras/src/layers/pooling/global_average_pooling3d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalAveragePooling3D",
"keras.layers.GlobalAvgPool3D",
]
)
class GlobalAveragePooling3D(BaseGlobalPooling):
"""Global average pooling operation for 3D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format="channels_first"`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 4, 3)
>>> y = keras.layers.GlobalAveragePooling3D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=3,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/max_pooling2d.py | keras/src/layers/pooling/max_pooling2d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"])
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/base_adaptive_pooling.py | keras/src/layers/pooling/base_adaptive_pooling.py | """Base classes for adaptive pooling layers."""
from keras.src import ops
from keras.src.backend import config
from keras.src.layers.layer import Layer
class BaseAdaptivePooling(Layer):
"""Base class shared by all adaptive pooling layers."""
def __init__(self, output_size, data_format=None, **kwargs):
"""Initialize base adaptive pooling layer.
Args:
output_size: Normalized spatial output size as a tuple
(for example, (32,), (32, 32), or (32, 32, 32)).
data_format: Either "channels_last" or "channels_first".
**kwargs: Additional layer keyword arguments.
"""
super().__init__(**kwargs)
self.output_size = output_size
self.data_format = data_format or config.image_data_format()
if self.data_format not in {"channels_first", "channels_last"}:
raise ValueError(
f"Invalid data_format: {self.data_format}. "
"Expected 'channels_first' or 'channels_last'."
)
def compute_output_shape(self, input_shape):
"""Return the output shape tensor after pooling."""
batch_size = input_shape[0]
if self.data_format == "channels_last":
channels = input_shape[-1]
return (batch_size, *self.output_size, channels)
else:
channels = input_shape[1]
return (batch_size, channels, *self.output_size)
def get_config(self):
config_dict = {
"output_size": self.output_size,
"data_format": self.data_format,
}
base_config = super().get_config()
return {**base_config, **config_dict}
class BaseAdaptiveAveragePooling(BaseAdaptivePooling):
"""Base class for adaptive average pooling in 1D, 2D, and 3D."""
def call(self, inputs):
return ops.adaptive_average_pool(
inputs, output_size=self.output_size, data_format=self.data_format
)
class BaseAdaptiveMaxPooling(BaseAdaptivePooling):
"""Base class for adaptive max pooling in 1D, 2D, and 3D."""
def call(self, inputs):
return ops.adaptive_max_pool(
inputs, output_size=self.output_size, data_format=self.data_format
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/pooling/max_pooling3d.py | keras/src/layers/pooling/max_pooling3d.py | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"])
class MaxPooling3D(BasePooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the maximum value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size=(2, 2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/time_distributed.py | keras/src/layers/rnn/time_distributed.py | """Wrapper layer to apply every temporal slice of an input."""
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.core.wrapper import Wrapper
from keras.src.layers.layer import Layer
@keras_export("keras.layers.TimeDistributed")
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
Every input should be at least 3D, and the dimension of index one of the
first input will be considered to be the temporal dimension.
Consider a batch of 32 video samples, where each sample is a 128x128 RGB
image with `channels_last` data format, across 10 timesteps.
The batch input shape is `(32, 10, 128, 128, 3)`.
You can then use `TimeDistributed` to apply the same `Conv2D` layer to each
of the 10 timesteps, independently:
>>> inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32)
>>> conv_2d_layer = layers.Conv2D(64, (3, 3))
>>> outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
>>> outputs.shape
(32, 10, 126, 126, 64)
Because `TimeDistributed` applies the same instance of `Conv2D` to each of
the timestamps, the same set of weights are used at each timestamp.
Args:
layer: a `keras.layers.Layer` instance.
Call arguments:
inputs: Input tensor of shape (batch, time, ...) or nested tensors,
and each of which has shape (batch, time, ...).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
wrapped layer (only if the layer supports this argument).
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. This argument is passed to the
wrapped layer (only if the layer supports this argument).
"""
def __init__(self, layer, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `TimeDistributed` layer with a "
f"`keras.layers.Layer` instance. Received: {layer}"
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def _get_child_input_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) < 3:
raise ValueError(
"`TimeDistributed` Layer should be passed an `input_shape` "
f"with at least 3 dimensions, received: {input_shape}"
)
return (input_shape[0], *input_shape[2:])
def compute_output_shape(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
child_output_shape = self.layer.compute_output_shape(child_input_shape)
return (child_output_shape[0], input_shape[1], *child_output_shape[1:])
def build(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
super().build(child_input_shape)
def call(self, inputs, training=None, mask=None):
input_shape = ops.shape(inputs)
mask_shape = None if mask is None else ops.shape(mask)
batch_size = input_shape[0]
timesteps = input_shape[1]
# For TF backend with graph mode and `partial_batch_size`, skip
# evaluation of `batch_size` as it can be a `strided_slice` and
# not a constant.
if backend.backend() == "tensorflow":
from keras.src.utils.module_utils import tensorflow as tf
if (
not tf.executing_eagerly
and mask_shape is not None
and mask_shape[1:2] != (timesteps,)
):
raise ValueError(
"`TimeDistributed` Layer should be passed a `mask` of "
f"shape ({batch_size}, {timesteps}, ...), "
f"received: mask.shape={mask_shape}"
)
elif mask_shape is not None and mask_shape[:2] != (
batch_size,
timesteps,
):
raise ValueError(
"`TimeDistributed` Layer should be passed a `mask` of "
f"shape ({batch_size}, {timesteps}, ...), "
f"received: mask.shape={mask_shape}"
)
def time_distributed_transpose(data):
"""Swaps the timestep and batch dimensions of a tensor."""
axes = [1, 0, *range(2, len(data.shape))]
return ops.transpose(data, axes=axes)
inputs = time_distributed_transpose(inputs)
if mask is not None:
mask = time_distributed_transpose(mask)
def step_function(i):
kwargs = {}
if self.layer._call_has_mask_arg and mask is not None:
kwargs["mask"] = mask[i]
if self.layer._call_has_training_arg:
kwargs["training"] = training
return self.layer.call(inputs[i], **kwargs)
# Implementation #1: is the time axis is static, use a Python for loop.
if inputs.shape[0] is not None:
outputs = ops.stack(
[step_function(i) for i in range(inputs.shape[0])]
)
return time_distributed_transpose(outputs)
# Implementation #2: use backend.vectorized_map.
outputs = backend.vectorized_map(step_function, ops.arange(timesteps))
return time_distributed_transpose(outputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/dropout_rnn_cell_test.py | keras/src/layers/rnn/dropout_rnn_cell_test.py | import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
class RNNCellWithDropout(layers.Layer, DropoutRNNCell):
def __init__(
self, units, dropout=0.5, recurrent_dropout=0.5, seed=None, **kwargs
):
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.units = units
self.state_size = units
self.dropout = dropout
self.recurrent_dropout = recurrent_dropout
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel",
)
def call(self, inputs, states, training=False):
if training:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
if training:
rdp_mask = self.get_recurrent_dropout_mask(prev_output)
prev_output = prev_output * rdp_mask
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
class DropoutRNNCellTest(testing.TestCase):
def test_seed_tracking(self):
cell = RNNCellWithDropout(3, seed=1337)
self.assertEqual(len(cell.non_trainable_variables), 1)
layer = layers.RNN(cell)
self.assertEqual(len(layer.non_trainable_variables), 1)
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": RNNCellWithDropout(5, seed=1337)},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
# manually set dtype to mixed_float16 to run mixed precision check
run_mixed_precision_check = True
if backend.backend() == "torch":
import torch
run_mixed_precision_check = torch.cuda.is_available()
if run_mixed_precision_check:
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": RNNCellWithDropout(
5, seed=1337, dtype="mixed_float16"
),
"dtype": "mixed_float16",
},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/rnn_test.py | keras/src/layers/rnn/rnn_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class OneStateRNNCell(layers.Layer):
def __init__(self, units, state_size=None, **kwargs):
super().__init__(**kwargs)
self.units = units
self.state_size = state_size if state_size else units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel",
)
def call(self, inputs, states):
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
class TwoStatesRNNCell(layers.Layer):
def __init__(self, units, state_size=None, **kwargs):
super().__init__(**kwargs)
self.units = units
self.state_size = state_size if state_size else [units, units]
self.output_size = units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel_1 = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel_1",
)
self.recurrent_kernel_2 = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel_2",
)
def call(self, inputs, states):
prev_1 = states[0]
prev_2 = states[0]
h = ops.matmul(inputs, self.kernel)
output_1 = h + ops.matmul(prev_1, self.recurrent_kernel_1)
output_2 = h + ops.matmul(prev_2, self.recurrent_kernel_2)
output = output_1 + output_2
return output, [output_1, output_2]
class RNNTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": OneStateRNNCell(5, state_size=5)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": OneStateRNNCell(5, state_size=[5])},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": OneStateRNNCell(5, state_size=(5,))},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": OneStateRNNCell(5), "return_sequences": True},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": OneStateRNNCell(5),
"go_backwards": True,
"unroll": True,
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": TwoStatesRNNCell(5, state_size=[5, 5])},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": TwoStatesRNNCell(5, state_size=(5, 5))},
input_shape=(3, 2, 4),
expected_output_shape=(3, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": TwoStatesRNNCell(5), "return_sequences": True},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
)
def test_compute_output_shape_single_state(self):
sequence = np.ones((3, 4, 5))
layer = layers.RNN(OneStateRNNCell(8), return_sequences=False)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape, (3, 8))
layer = layers.RNN(OneStateRNNCell(8), return_sequences=True)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape, (3, 4, 8))
layer = layers.RNN(
OneStateRNNCell(8), return_sequences=False, return_state=True
)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape[0], (3, 8))
self.assertEqual(output_shape[1], (3, 8))
layer = layers.RNN(
OneStateRNNCell(8), return_sequences=True, return_state=True
)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape[0], (3, 4, 8))
self.assertEqual(output_shape[1], (3, 8))
def test_compute_output_shape_two_states(self):
sequence = np.ones((3, 4, 5))
layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=False)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape, (3, 8))
layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=True)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape, (3, 4, 8))
layer = layers.RNN(
TwoStatesRNNCell(8), return_sequences=False, return_state=True
)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape[0], (3, 8))
self.assertEqual(output_shape[1], (3, 8))
self.assertEqual(output_shape[2], (3, 8))
layer = layers.RNN(
TwoStatesRNNCell(8), return_sequences=True, return_state=True
)
output_shape = layer.compute_output_shape(sequence.shape)
self.assertEqual(output_shape[0], (3, 4, 8))
self.assertEqual(output_shape[1], (3, 8))
self.assertEqual(output_shape[2], (3, 8))
def test_dynamic_shapes(self):
sequence_shape = (None, None, 3)
layer = layers.RNN(OneStateRNNCell(8), return_sequences=False)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape, (None, 8))
layer = layers.RNN(OneStateRNNCell(8), return_sequences=True)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape, (None, None, 8))
layer = layers.RNN(
OneStateRNNCell(8), return_sequences=False, return_state=True
)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape[0], (None, 8))
self.assertEqual(output_shape[1], (None, 8))
layer = layers.RNN(
OneStateRNNCell(8), return_sequences=True, return_state=True
)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape[0], (None, None, 8))
self.assertEqual(output_shape[1], (None, 8))
layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=False)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape, (None, 8))
layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=True)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape, (None, None, 8))
layer = layers.RNN(
TwoStatesRNNCell(8), return_sequences=False, return_state=True
)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape[0], (None, 8))
self.assertEqual(output_shape[1], (None, 8))
self.assertEqual(output_shape[2], (None, 8))
layer = layers.RNN(
TwoStatesRNNCell(8), return_sequences=True, return_state=True
)
output_shape = layer.compute_output_shape(sequence_shape)
self.assertEqual(output_shape[0], (None, None, 8))
self.assertEqual(output_shape[1], (None, 8))
self.assertEqual(output_shape[2], (None, 8))
def test_forward_pass_single_state(self):
sequence = np.ones((1, 2, 3))
layer = layers.RNN(OneStateRNNCell(2), return_sequences=False)
output = layer(sequence)
self.assertAllClose(np.array([[9.0, 9.0]]), output)
layer = layers.RNN(OneStateRNNCell(2), return_sequences=True)
output = layer(sequence)
self.assertAllClose(np.array([[[3.0, 3.0], [9.0, 9.0]]]), output)
layer = layers.RNN(
OneStateRNNCell(2), return_sequences=False, return_state=True
)
output, state = layer(sequence)
self.assertAllClose(np.array([[9.0, 9.0]]), output)
self.assertAllClose(np.array([[9.0, 9.0]]), state)
layer = layers.RNN(
OneStateRNNCell(2), return_sequences=True, return_state=True
)
output, state = layer(sequence)
self.assertAllClose(np.array([[[3.0, 3.0], [9.0, 9.0]]]), output)
self.assertAllClose(np.array([[9.0, 9.0]]), state)
def test_forward_pass_two_states(self):
sequence = np.ones((1, 2, 3))
layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False)
output = layer(sequence)
self.assertAllClose(np.array([[18.0, 18.0]]), output)
layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=True)
output = layer(sequence)
self.assertAllClose(np.array([[[6.0, 6.0], [18.0, 18.0]]]), output)
layer = layers.RNN(
TwoStatesRNNCell(2), return_sequences=False, return_state=True
)
output, state1, state2 = layer(sequence)
self.assertAllClose(np.array([[18.0, 18.0]]), output)
self.assertAllClose(np.array([[9.0, 9.0]]), state1)
self.assertAllClose(np.array([[9.0, 9.0]]), state2)
layer = layers.RNN(
TwoStatesRNNCell(2), return_sequences=True, return_state=True
)
output, state1, state2 = layer(sequence)
self.assertAllClose(np.array([[[6.0, 6.0], [18.0, 18.0]]]), output)
self.assertAllClose(np.array([[9.0, 9.0]]), state1)
self.assertAllClose(np.array([[9.0, 9.0]]), state2)
def test_passing_initial_state_single_state(self):
sequence = np.ones((2, 3, 2))
state = np.ones((2, 2))
layer = layers.RNN(OneStateRNNCell(2), return_sequences=False)
output = layer(sequence, initial_state=state)
self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), output)
layer = layers.RNN(
OneStateRNNCell(2), return_sequences=False, return_state=True
)
output, state = layer(sequence, initial_state=state)
self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), output)
self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state)
def test_passing_initial_state_two_states(self):
sequence = np.ones((2, 3, 2))
state = [np.ones((2, 2)), np.ones((2, 2))]
layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False)
output = layer(sequence, initial_state=state)
self.assertAllClose(np.array([[44.0, 44.0], [44.0, 44.0]]), output)
layer = layers.RNN(
TwoStatesRNNCell(2), return_sequences=False, return_state=True
)
output, state_1, state_2 = layer(sequence, initial_state=state)
self.assertAllClose(np.array([[44.0, 44.0], [44.0, 44.0]]), output)
self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state_1)
self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state_2)
def test_statefulness_single_state(self):
sequence = np.ones((1, 2, 3))
layer = layers.RNN(OneStateRNNCell(2), stateful=True)
layer(sequence)
output = layer(sequence)
self.assertAllClose(np.array([[45.0, 45.0]]), output)
layer = layers.RNN(OneStateRNNCell(2), stateful=True, return_state=True)
layer(sequence)
output, state = layer(sequence)
self.assertAllClose(np.array([[45.0, 45.0]]), output)
self.assertAllClose(np.array([[45.0, 45.0]]), state)
def test_statefulness_two_states(self):
sequence = np.ones((1, 2, 3))
layer = layers.RNN(TwoStatesRNNCell(2), stateful=True)
layer(sequence)
output = layer(sequence)
self.assertAllClose(np.array([[90.0, 90.0]]), output)
layer = layers.RNN(
TwoStatesRNNCell(2), stateful=True, return_state=True
)
layer(sequence)
output, state_1, state_2 = layer(sequence)
self.assertAllClose(np.array([[90.0, 90.0]]), output)
self.assertAllClose(np.array([[45.0, 45.0]]), state_1)
self.assertAllClose(np.array([[45.0, 45.0]]), state_2)
def test_go_backwards(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.RNN(OneStateRNNCell(2), go_backwards=True)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array([[202.0, 202.0], [538.0, 538.0]]),
output,
tpu_atol=1e-4,
tpu_rtol=1e-4,
)
layer = layers.RNN(OneStateRNNCell(2), stateful=True, return_state=True)
layer(sequence)
output, state = layer(sequence)
self.assertAllClose(
np.array([[954.0, 954.0], [3978.0, 3978.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[954.0, 954.0], [3978.0, 3978.0]]),
state,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_serialization(self):
layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False)
self.run_class_serialization_test(layer)
layer = layers.RNN(OneStateRNNCell(2), return_sequences=False)
self.run_class_serialization_test(layer)
def test_stateful_batch_size_mismatch_raises(self):
from keras.src.models import Functional
batch_size = 4
timesteps = 5
features = 3
layer = layers.RNN(TwoStatesRNNCell(2), stateful=True)
inputs = layers.Input(
shape=(timesteps, features), batch_size=batch_size
)
model = Functional(inputs, layer(inputs))
# Call once with correct batch size
x = ops.random.uniform(shape=(batch_size, timesteps, features))
_ = model(x)
# Expect ValueError when called with incorrect batch size
with self.assertRaisesRegex(ValueError, "batch size"):
x_bad = ops.random.uniform(shape=(1, timesteps, features))
model(x_bad)
# TODO: test masking
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/gru_test.py | keras/src/layers/rnn/gru_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class GRUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.GRU,
init_kwargs={"units": 3, "dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.GRU,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.GRU,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
@parameterized.parameters([1, 2])
def test_correctness(self, implementation):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.24406259, 0.24406259, 0.24406259],
[0.611516, 0.611516, 0.611516],
[0.3928808, 0.3928808, 0.3928808],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
reset_after=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.51447755, 0.51447755, 0.51447755],
[0.6426879, 0.6426879, 0.6426879],
[0.40208298, 0.40208298, 0.40208298],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
use_bias=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.49988455, 0.49988455, 0.49988455],
[0.64701194, 0.64701194, 0.64701194],
[0.4103359, 0.4103359, 0.4103359],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.GRU(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(4).reshape((2, 2)).astype("float32")
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.23774096, 0.33508456], [0.83659905, 1.0227708]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13486053, 0.23261218], [0.78257304, 0.9691353]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_pass_return_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(4).reshape((2, 2)).astype("float32")
# Test with go_backwards=False
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
)
output, state = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.23774096, 0.33508456], [0.83659905, 1.0227708]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
output,
state,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
# Test with go_backwards=True
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
go_backwards=True,
)
output, state = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13486053, 0.23261218], [0.78257304, 0.9691353]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
output,
state,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.19393763, 0.19393763], [0.30818558, 0.30818558]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.09497581, 0.09497581],
[0.19393763, 0.19393763],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.30818558, 0.30818558],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.0, 0.0],
[0.19393763, 0.19393763],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.0, 0.0],
[0.0, 0.0],
[0.30818558, 0.30818558],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.11669192, 0.11669192], [0.28380975, 0.28380975]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_legacy_implementation_argument(self):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
config = layer.get_config()
config["implementation"] = 0 # Add legacy argument
layer = layers.GRU.from_config(config)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Test only applicable to fixing a bug with symbolic batch size "
"for TensorFlow backend.",
)
def test_stateful_with_symbolic_batch_size(self):
layer = layers.GRU(5, stateful=True)
x_concrete = np.ones((2, 10, 10), dtype=np.float32)
_ = layer(x_concrete, training=True)
import tensorflow as tf
@tf.function(
input_signature=[
tf.TensorSpec(shape=(None, 10, 10), dtype=tf.float32)
]
)
def f(x):
return layer(x, training=True)
y = f(x_concrete)
self.assertEqual(y.shape, (2, 5))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm1d_test.py | keras/src/layers/rnn/conv_lstm1d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class ConvLSTM1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
channels_last = backend.config.image_data_format() == "channels_last"
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 3) if channels_last else (3, 2, 3, 4),
expected_output_shape=(3, 4, 5) if channels_last else (3, 5, 4),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 3) if channels_last else (3, 2, 3, 8),
call_kwargs={"training": True},
expected_output_shape=(3, 6, 5) if channels_last else (3, 5, 6),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 3) if channels_last else (3, 2, 3, 8),
expected_output_shape=(
(3, 2, 6, 5) if channels_last else (3, 2, 5, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(120).reshape((2, 3, 4, 5)).astype("float32") / 10
expected_output = np.array(
[
[[0.40807986, 0.40807986], [0.46421072, 0.46421072]],
[[0.80933154, 0.80933154], [0.8233646, 0.8233646]],
]
)
if backend.config.image_data_format() == "channels_first":
sequence = sequence.transpose((0, 1, 3, 2))
expected_output = expected_output.transpose((0, 2, 1))
layer = layers.ConvLSTM1D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
expected_output,
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/stacked_rnn_cells.py | keras/src/layers/rnn/stacked_rnn_cells.py | from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.StackedRNNCells")
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Example:
```python
batch_size = 3
sentence_length = 5
num_features = 2
new_shape = (batch_size, sentence_length, num_features)
x = np.reshape(np.arange(30), new_shape)
rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
super().__init__(**kwargs)
for cell in cells:
if "call" not in dir(cell):
raise ValueError(
"All cells must have a `call` method. "
f"Received cell without a `call` method: {cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"All cells must have a `state_size` attribute. "
f"Received cell without a `state_size`: {cell}"
)
self.cells = cells
@property
def state_size(self):
return [c.state_size for c in self.cells]
@property
def output_size(self):
if getattr(self.cells[-1], "output_size", None) is not None:
return self.cells[-1].output_size
elif isinstance(self.cells[-1].state_size, (list, tuple)):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, batch_size=None):
initial_states = []
for cell in self.cells:
get_initial_state_fn = getattr(cell, "get_initial_state", None)
if get_initial_state_fn:
initial_states.append(
get_initial_state_fn(batch_size=batch_size)
)
else:
if isinstance(cell.state_size, int):
initial_states.append(
ops.zeros(
(batch_size, cell.state_size),
dtype=self.compute_dtype,
)
)
else:
initial_states.append(
[
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in cell.state_size
]
)
return initial_states
def call(self, inputs, states, training=False, **kwargs):
# Call the cells in order and store the returned states.
new_states = []
for cell, states in zip(self.cells, states):
state_is_list = tree.is_nested(states)
states = list(states) if tree.is_nested(states) else [states]
if isinstance(cell, Layer) and cell._call_has_training_arg:
kwargs["training"] = training
else:
kwargs.pop("training", None)
cell_call_fn = cell.__call__ if callable(cell) else cell.call
inputs, states = cell_call_fn(inputs, states, **kwargs)
if len(states) == 1 and not state_is_list:
states = states[0]
new_states.append(states)
if len(new_states) == 1:
new_states = new_states[0]
return inputs, new_states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer) and not cell.built:
cell.build(input_shape)
cell.built = True
if getattr(cell, "output_size", None) is not None:
output_dim = cell.output_size
elif isinstance(cell.state_size, (list, tuple)):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tree.flatten(input_shape)[0]
input_shape = (batch_size, output_dim)
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {"cells": cells}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cells = []
for cell_config in config.pop("cells"):
cells.append(
serialization_lib.deserialize_keras_object(
cell_config, custom_objects=custom_objects
)
)
return cls(cells, **config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm_test.py | keras/src/layers/rnn/conv_lstm_test.py | import numpy as np
from keras.src import backend
from keras.src import initializers
from keras.src import testing
from keras.src.layers.rnn.conv_lstm import ConvLSTM
from keras.src.layers.rnn.conv_lstm import ConvLSTMCell
class ConvLSTMCellTest(testing.TestCase):
def test_correctness(self):
x = np.arange(150).reshape((2, 5, 5, 3)).astype("float32") / 10
s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10
s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10
if backend.config.image_data_format() == "channels_first":
x = x.transpose((0, 3, 1, 2))
s1 = s1.transpose((0, 3, 1, 2))
s2 = s2.transpose((0, 3, 1, 2))
layer = ConvLSTMCell(
rank=2,
filters=4,
kernel_size=3,
padding="same",
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
)
output = layer(x, [s1, s2])
checksum_0 = np.sum(backend.convert_to_numpy(output[0]))
self.assertAllClose(checksum_0, 188.89502, tpu_atol=1e-4, tpu_rtol=1e-4)
checksum_1 = np.sum(backend.convert_to_numpy(output[1][0]))
self.assertAllClose(checksum_1, 188.89502, tpu_atol=1e-4, tpu_rtol=1e-4)
checksum_2 = np.sum(backend.convert_to_numpy(output[1][1]))
self.assertAllClose(checksum_2, 2170.444, tpu_atol=1e-4, tpu_rtol=1e-4)
class ConvLSTMTest(testing.TestCase):
def test_correctness(self):
x = np.arange(450).reshape((2, 3, 5, 5, 3)).astype("float32") / 100
s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100
s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100
if backend.config.image_data_format() == "channels_first":
x = x.transpose((0, 1, 4, 2, 3))
s1 = s1.transpose((0, 3, 1, 2))
s2 = s2.transpose((0, 3, 1, 2))
layer = ConvLSTM(
rank=2,
filters=4,
kernel_size=3,
padding="same",
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
)
output = layer(x, initial_state=[s1, s2])
output = backend.convert_to_numpy(output)
self.assertAllClose(
np.sum(output), 119.812454, tpu_atol=1e-3, tpu_rtol=1e-3
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/simple_rnn.py | keras/src/layers/rnn/simple_rnn.py | from keras.src import activations
from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.src.layers.rnn.rnn import RNN
@keras_export("keras.layers.SimpleRNNCell")
class SimpleRNNCell(Layer, DropoutRNNCell):
"""Cell class for SimpleRNN.
This class processes one step within the whole time sequence input, whereas
`keras.layer.SimpleRNN` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation
of the recurrent state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
Call arguments:
sequence: A 2D tensor, with shape `(batch, features)`.
states: A 2D tensor with shape `(batch, units)`, which is the state
from the previous time step.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Example:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
rnn = keras.layers.RNN(keras.layers.SimpleRNNCell(4))
output = rnn(inputs) # The output has shape `(32, 4)`.
rnn = keras.layers.RNN(
keras.layers.SimpleRNNCell(4),
return_sequences=True,
return_state=True
)
# whole_sequence_output has shape `(32, 10, 4)`.
# final_state has shape `(32, 4)`.
whole_sequence_output, final_state = rnn(inputs)
```
"""
def __init__(
self,
units,
activation="tanh",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
def call(self, sequence, states, training=False):
prev_output = states[0] if isinstance(states, (list, tuple)) else states
dp_mask = self.get_dropout_mask(sequence)
rec_dp_mask = self.get_recurrent_dropout_mask(prev_output)
if training and dp_mask is not None:
sequence = sequence * dp_mask
h = ops.matmul(sequence, self.kernel)
if self.bias is not None:
h = ops.add(h, self.bias)
if training and rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + ops.matmul(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
new_state = [output] if isinstance(states, (list, tuple)) else output
return output, new_state
def get_initial_state(self, batch_size=None):
return [
ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype)
]
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.layers.SimpleRNN")
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back as the new input.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses
a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation
of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state
in addition to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default: `False`). If `True`, the last state
for each sample at index i in a batch will be used as the
initial state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up an RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
sequence: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry
indicates that the corresponding timestep should be utilized,
while a `False` entry indicates that the corresponding timestep
should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
This argument is passed to the cell when calling it.
This is only relevant if `dropout` or `recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Example:
```python
inputs = np.random.random((32, 10, 8))
simple_rnn = keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `(32, 4)`.
simple_rnn = keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True
)
# whole_sequence_output has shape `(32, 10, 4)`.
# final_state has shape `(32, 4)`.
whole_sequence_output, final_state = simple_rnn(inputs)
```
"""
def __init__(
self,
units,
activation="tanh",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
seed=None,
**kwargs,
):
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
dtype=kwargs.get("dtype", None),
trainable=kwargs.get("trainable", True),
name="simple_rnn_cell",
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs,
)
self.input_spec = [InputSpec(ndim=3)]
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.