repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/layer_test.py | keras/src/layers/layer_test.py | import pickle
from unittest import mock
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import Input
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import global_state
from keras.src.backend.common.remat import RematScope
from keras.src.models import Model
from keras.src.utils import traceback_utils
class MockRemat:
"""Mock remat by returning a wrapper Mock calling the original function"""
def __init__(self):
self.rematted_functions = {}
def __call__(self, func):
if func in self.rematted_functions:
return self.rematted_functions[func]
wrapped_func = mock.Mock(wraps=func)
self.rematted_functions[func] = wrapped_func
return wrapped_func
class LayerTest(testing.TestCase):
def test_compute_output_spec(self):
# Test that implementing compute_output_shape
# is enough to make compute_output_spec work.
# Case: single output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return input_shape
layer = TestLayer()
self.assertEqual(
layer.compute_output_spec(backend.KerasTensor((2, 3))).shape, (2, 3)
)
# Case: tuple output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return (input_shape, input_shape)
layer = TestLayer()
out = layer.compute_output_spec(backend.KerasTensor((2, 3)))
self.assertIsInstance(out, tuple)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (2, 3))
self.assertEqual(out[1].shape, (2, 3))
# Case: list output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return [input_shape, input_shape]
layer = TestLayer()
out = layer.compute_output_spec(backend.KerasTensor((2, 3)))
self.assertIsInstance(out, list)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (2, 3))
self.assertEqual(out[1].shape, (2, 3))
# Case: dict output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return {"1": input_shape, "2": input_shape}
layer = TestLayer()
out = layer.compute_output_spec(backend.KerasTensor((2, 3)))
self.assertIsInstance(out, dict)
self.assertEqual(len(out), 2)
self.assertEqual(out["1"].shape, (2, 3))
self.assertEqual(out["2"].shape, (2, 3))
# Case: nested tuple output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return (
input_shape,
(input_shape, input_shape),
(input_shape, input_shape),
)
layer = TestLayer()
out = layer.compute_output_spec(backend.KerasTensor((2, 3)))
self.assertIsInstance(out, tuple)
self.assertEqual(len(out), 3)
self.assertEqual(out[0].shape, (2, 3))
self.assertIsInstance(out[1], tuple)
self.assertEqual(len(out[1]), 2)
self.assertEqual(out[1][0].shape, (2, 3))
self.assertEqual(out[1][1].shape, (2, 3))
self.assertIsInstance(out[2], tuple)
self.assertEqual(len(out[2]), 2)
self.assertEqual(out[2][0].shape, (2, 3))
self.assertEqual(out[2][1].shape, (2, 3))
# Case: nested dict output
class TestLayer(layers.Layer):
def call(self, x):
assert False # Should never be called.
def compute_output_shape(self, input_shape):
return {
"1": input_shape,
"2": {"11": input_shape, "22": input_shape},
}
layer = TestLayer()
out = layer.compute_output_spec(backend.KerasTensor((2, 3)))
self.assertIsInstance(out, dict)
self.assertEqual(len(out), 2)
self.assertEqual(out["1"].shape, (2, 3))
self.assertIsInstance(out["2"], dict)
self.assertEqual(len(out["2"]), 2)
self.assertEqual(out["2"]["11"].shape, (2, 3))
self.assertEqual(out["2"]["22"].shape, (2, 3))
def test_positional_arg_error(self):
class SomeLayer(layers.Layer):
def call(self, x, bool_arg):
if bool_arg:
return x
return x + 1
x = backend.KerasTensor(shape=(2, 3), name="x")
with self.assertRaisesRegex(
ValueError, "Only input tensors may be passed as"
):
SomeLayer()(x, True)
# This works
SomeLayer()(x, bool_arg=True)
@parameterized.named_parameters(
("call", "call", None),
("compute_output_shape", "compute_output_shape", None),
(
"quantized_build",
"quantized_build",
{"input_shape": None, "mode": None},
),
("quantize", "quantize", {"mode": "int8"}),
("_int8_call", "_int8_call", None),
("_float8_call", "_float8_call", None),
)
def test_not_implemented_error(self, method, args):
layer = layers.Layer()
layer.built = True
with self.assertRaisesRegex(
NotImplementedError,
f"does not have a `{method}` method implemented.",
):
if isinstance(args, dict):
getattr(layer, method)(**args)
else:
getattr(layer, method)(args)
def test_layer_with_remat(self):
"""Test rematerialization on a simple layer."""
# Create a mock to track calls to remat
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
class SomeLayer(layers.Layer):
def call(self, x):
return x + 1
input_tensor = backend.random.uniform((2, 4))
layer = SomeLayer()
# Case 1: Without rematerialization
output_no_remat = layer(input_tensor)
# Case 2: With rematerialization
with RematScope(mode="full"):
layer = SomeLayer()
output_with_remat = layer(input_tensor)
# Assert outputs are the same
self.assertAllClose(output_no_remat, output_with_remat)
# Ensure remat was applied in the second case
self.assertLen(mock_remat.rematted_functions, 1)
next(iter(mock_remat.rematted_functions.values())).assert_called()
def test_quantized_layer_with_remat(self):
"""Test rematerialization on a quantized layer."""
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
input_tensor = backend.random.uniform((2, 4))
# Case 2: With rematerialization
with RematScope(mode="full"):
layer = layers.Dense(3)
layer.build((2, 4))
layer.quantize("float8")
layer(input_tensor)
# Ensure remat was applied
self.assertLen(mock_remat.rematted_functions, 1)
next(iter(mock_remat.rematted_functions.values())).assert_called()
def test_gptq_quantization_by_setting_dtype(self):
"""Tests error being raised when dtype is set to GPTQ."""
with self.assertRaisesRegex(
ValueError,
"Implicitly enabling GPTQ quantization.*is not supported",
):
layer = layers.Dense(3)
layer.build((2, 4))
layer.dtype_policy = "gptq/4/-1_from_float32"
def test_functional_model_with_remat(self):
if backend.backend() in ("openvino", "numpy"):
self.skipTest(
"remat is not supported in openvino and numpy backends."
)
traceback_utils.enable_traceback_filtering()
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
# Define model inputs
inputs = Input(shape=(32, 32, 3))
# just one layer in remat scope
with RematScope(mode="activations"):
layer = layers.Dense(64, activation="relu")
output = layer(layers.Flatten()(inputs))
# Build the functional model
model = Model(inputs=inputs, outputs=output)
# Compile the model
model.compile(optimizer="adam", loss="mse")
# Generate dummy data for testing
x_train = np.random.random((10, 32, 32, 3)).astype(np.float32)
y_train = np.random.random((10, 64)).astype(np.float32)
# Run training to ensure `RematScope` is applied correctly
model.fit(x_train, y_train, epochs=1, batch_size=2, verbose=0)
self.assertLen(mock_remat.rematted_functions, 1)
next(iter(mock_remat.rematted_functions.values())).assert_called()
def test_remat_wrapper_list_of_layers(self):
"""Test rematerialization using list_of_layers mode."""
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
class TestLayer(layers.Layer):
def call(self, x):
return x + 1
class OtherLayer(layers.Layer):
def call(self, x):
return x * 2
remat_layers = ["test_layer"]
input_tensor = backend.random.uniform((4, 4))
with RematScope(mode="list_of_layers", layer_names=remat_layers):
test_layer = TestLayer(name="test_layer")
other_layer = OtherLayer(name="other_layer")
output_test = test_layer(input_tensor)
output_other = other_layer(input_tensor)
self.assertAllClose(output_test, input_tensor + 1)
self.assertAllClose(output_other, input_tensor * 2)
# Ensure remat was applied to the correct layer
self.assertLen(mock_remat.rematted_functions, 1)
next(iter(mock_remat.rematted_functions.values())).assert_called()
def test_remat_larger_than_mode(self):
"""Test rematerialization using larger_than mode."""
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
class TestLayer(layers.Layer):
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
return x + 1
input_tensor = backend.random.uniform((100, 100)) # Large tensor
with RematScope(mode="larger_than", output_size_threshold=5000):
layer = TestLayer()
output = layer(input_tensor)
self.assertAllClose(output, input_tensor + 1)
# Ensure remat was applied
self.assertLen(mock_remat.rematted_functions, 1)
next(iter(mock_remat.rematted_functions.values())).assert_called()
def test_remat_larger_than_mode_high_threshold(self):
"""Test rematerialization using larger_than mode."""
mock_remat = MockRemat()
with mock.patch(
"keras.src.backend.common.remat.remat", wraps=mock_remat
):
class TestLayer(layers.Layer):
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
return x + 1
input_tensor = backend.random.uniform((100, 100)) # Large tensor
with RematScope(mode="larger_than", output_size_threshold=50000):
layer = TestLayer()
output = layer(input_tensor)
self.assertAllClose(output, input_tensor + 1)
# Ensure remat was not applied
self.assertLen(mock_remat.rematted_functions, 0)
def test_rng_seed_tracking(self):
class RNGLayer(layers.Layer):
def __init__(self):
super().__init__()
self.seed_gen = backend.random.SeedGenerator(seed=1337)
def call(self, x):
return x * backend.random.normal(x.shape, seed=self.seed_gen)
layer = RNGLayer()
self.assertEqual(layer.variables, [layer.seed_gen.state])
self.assertAllClose(layer.variables[0], [1337, 0])
layer(np.ones((3, 4)))
self.assertAllClose(layer.variables[0], [1337, 1])
# Test tracking in list attributes.
class RNGListLayer(layers.Layer):
def __init__(self):
super().__init__()
self.seed_gens = []
self.seed_gens.append(backend.random.SeedGenerator(seed=1))
self.seed_gens.append(backend.random.SeedGenerator(seed=10))
def call(self, x):
x = x * backend.random.normal(x.shape, seed=self.seed_gens[0])
x = x * backend.random.normal(x.shape, seed=self.seed_gens[1])
return x
layer = RNGListLayer()
self.assertEqual(
layer.variables,
[layer.seed_gens[0].state, layer.seed_gens[1].state],
)
self.assertAllClose(layer.variables[0], [1, 0])
self.assertAllClose(layer.variables[1], [10, 0])
layer(np.ones((3, 4)))
self.assertAllClose(layer.variables[0], [1, 1])
self.assertAllClose(layer.variables[1], [10, 1])
def test_layer_tracking(self):
class LayerWithDenseLayers(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense1 = layers.Dense(units)
self.layer_dict = {
"dense2": layers.Dense(units),
}
self.layer_list = [layers.Dense(units)]
self.units = units
self.seed_generator = backend.random.SeedGenerator(seed=1)
def build(self, input_shape):
self.layer_list.append(layers.Dense(self.units))
def call(self, x):
x = self.dense1(x)
x = self.layer_dict["dense2"](x)
x = self.layer_list[0](x)
x = self.layer_list[1](x)
return x
class ParentLayer(layers.Layer):
def __init__(self, inner_layer):
super().__init__()
self.inner_layer = inner_layer
def call(self, x):
return self.inner_layer(x)
layer = LayerWithDenseLayers(3)
layer.build((1, 3))
self.assertLen(layer._layers, 4)
layer(np.zeros((1, 3)))
self.assertLen(layer.variables, 9)
self.assertLen(layer.weights, 8)
layer = ParentLayer(LayerWithDenseLayers(3))
self.assertLen(layer._layers, 1)
layer(np.zeros((1, 3)))
self.assertLen(layer.variables, 9)
self.assertLen(layer.weights, 8)
layer = ParentLayer(ParentLayer(LayerWithDenseLayers(3)))
self.assertLen(layer._layers, 1)
layer(np.zeros((1, 3)))
self.assertLen(layer.variables, 9)
self.assertLen(layer.weights, 8)
def test_metric_tracking(self):
class LayerWithMetric(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense = layers.Dense(units)
self.metric = metrics.MeanSquaredError(name="my_metric")
def build(self, input_shape):
self.dense.build(input_shape)
def call(self, x):
return self.dense(x)
class ParentLayerWithMetric(layers.Layer):
def __init__(self, inner_layer):
super().__init__()
self.inner_layer = inner_layer
self.metric = metrics.MeanSquaredError(name="my_metric")
def build(self, input_shape):
self.inner_layer.build(input_shape)
def call(self, x):
return self.inner_layer(x)
layer = LayerWithMetric(3)
layer.build((1, 3))
self.assertLen(layer.metrics, 1)
self.assertLen(layer.metrics_variables, 2)
self.assertLen(layer.trainable_variables, 2)
self.assertLen(layer.non_trainable_variables, 0)
layer = ParentLayerWithMetric(LayerWithMetric(3))
layer.build((1, 3))
self.assertLen(layer.metrics, 2)
self.assertLen(layer.metrics_variables, 4)
self.assertLen(layer.trainable_variables, 2)
self.assertLen(layer.non_trainable_variables, 0)
layer = ParentLayerWithMetric(ParentLayerWithMetric(LayerWithMetric(3)))
layer.build((1, 3))
self.assertLen(layer.metrics, 3)
self.assertLen(layer.metrics_variables, 6)
self.assertLen(layer.trainable_variables, 2)
self.assertLen(layer.non_trainable_variables, 0)
def test_build_on_call(self):
class LayerWithUnbuiltState(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense1 = layers.Dense(units)
def call(self, x):
return self.dense1(x)
layer = LayerWithUnbuiltState(2)
layer(backend.KerasTensor((3, 4)))
self.assertLen(layer.weights, 2)
class KwargsLayerWithUnbuiltState(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense1 = layers.Dense(units)
self.dense2 = layers.Dense(units)
def call(self, x1, x2):
return self.dense1(x1) + self.dense2(x2)
layer = KwargsLayerWithUnbuiltState(2)
layer(backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4)))
self.assertLen(layer.weights, 4)
layer = KwargsLayerWithUnbuiltState(2)
layer(x1=backend.KerasTensor((3, 4)), x2=backend.KerasTensor((3, 4)))
self.assertLen(layer.weights, 4)
class DictLayerWithUnbuiltState(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense = layers.Dense(units)
def call(self, xs):
result = self.dense(xs["x1"])
if xs.get("x2", None) is not None:
result += self.dense(xs["x2"])
return result
layer = DictLayerWithUnbuiltState(2)
layer(
{
"x1": backend.KerasTensor((3, 4)),
"x2": backend.KerasTensor((3, 4)),
}
)
self.assertLen(layer.weights, 2)
layer = DictLayerWithUnbuiltState(2)
layer({"x1": backend.KerasTensor((3, 4)), "x2": None})
self.assertLen(layer.weights, 2)
class ListLayerWithUnbuiltState(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense = layers.Dense(units)
def call(self, xs):
result = self.dense(xs[0])
if xs[1] is not None:
result += self.dense(xs[1])
return result
layer = ListLayerWithUnbuiltState(2)
layer([backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))])
self.assertLen(layer.weights, 2)
layer = ListLayerWithUnbuiltState(2)
layer([backend.KerasTensor((3, 4)), None])
self.assertLen(layer.weights, 2)
def test_activity_regularization(self):
class ActivityRegularizer(layers.Layer):
def call(self, x):
return x
layer = ActivityRegularizer(activity_regularizer="l1")
layer(np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 0.01)
# losses are reset upon call
layer(np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 0.01)
# KerasTensors are no op
layer = ActivityRegularizer(activity_regularizer="l1")
layer(layers.Input(batch_shape=(2, 2)))
self.assertLen(layer.losses, 0)
@pytest.mark.requires_trainable_backend
def test_add_loss(self):
class LossLayer(layers.Layer):
def call(self, x):
self.add_loss(ops.sum(x))
return x
layer = LossLayer()
layer(np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 1.0)
# losses are reset upon call
layer = LossLayer()
layer(np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 1.0)
# It works inside a model
model = models.Sequential([layer])
model(np.ones((1,)))
self.assertLen(model.losses, 1)
self.assertAllClose(model.losses[0], 1.0)
# It works recursively in nested models
model = models.Sequential([model])
model(np.ones((1,)))
self.assertLen(model.losses, 1)
self.assertAllClose(model.losses[0], 1.0)
def test_training_arg_value_resolution(self):
# Check that even if `training` is not passed
# to an inner layer, the outer value gets propagated
# in __call__.
class TrainingLayer(layers.Layer):
def __init__(self):
super().__init__()
self.dp = layers.Dropout(0.9)
def call(self, x, training=False):
return self.dp(x)
layer = TrainingLayer()
x = np.ones((4, 4))
y = layer(x)
self.assertEqual(ops.min(y), 1)
y = layer(x, training=True)
self.assertEqual(ops.min(y), 0)
# Check that it still works one level deeper.
class WrappedTrainingLayer(layers.Layer):
def __init__(self):
super().__init__()
self.dp = TrainingLayer()
def call(self, x, training=False):
return self.dp(x)
layer = WrappedTrainingLayer()
x = np.ones((4, 4))
y = layer(x)
self.assertEqual(ops.min(y), 1)
y = layer(x, training=True)
self.assertEqual(ops.min(y), 0)
# Check that if `training` is passed
# to an inner layer in call(), the explicitly
# passed value is what the layer sees.
class TrainingLayerExplicit(layers.Layer):
def __init__(self):
super().__init__()
self.dp = layers.Dropout(0.9)
def call(self, x, training=False):
return self.dp(x, training=True)
layer = TrainingLayerExplicit()
x = np.ones((4, 4))
y = layer(x, training=False)
self.assertEqual(ops.min(y), 0)
# Test that layer interruption does not cause
# the call context to linger
class BadLayer(layers.Layer):
def call(self, x, training=False):
raise RuntimeError("oops!")
x = np.ones((4, 4))
layer = BadLayer()
try:
# training=True will be recorded
# in the call context
layer(x, training=True)
except RuntimeError:
pass
layer = TrainingLayer()
# But this layer call should not see it
y = layer(x)
self.assertEqual(ops.min(y), 1)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Some torch ops not implemented for float16 on CPU.",
)
def test_mixed_precision(self):
x = np.ones((4, 4))
layer = layers.Dense(2, dtype="float16")
y = layer(x)
self.assertEqual(layer.compute_dtype, "float16")
self.assertEqual(layer.variable_dtype, "float16")
self.assertDType(y, "float16")
layer = layers.Dense(2, dtype="mixed_float16")
y = layer(x)
self.assertEqual(layer.compute_dtype, "float16")
self.assertEqual(layer.variable_dtype, "float32")
self.assertDType(y, "float16")
self.assertEqual(layer.kernel.dtype, "float32")
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Some torch ops not implemented for float16 on CPU.",
)
def test_autocast(self):
assertDType = self.assertDType
# A layer with a int dtype (some preprocessing layers do this).
class InnerLayerOne(layers.Layer):
def __init__(self):
super().__init__(dtype="int")
self.v = self.add_weight(
shape=(),
initializer="ones",
trainable=True,
dtype="float32",
)
self._build_at_init()
def call(self, x):
# Should not autocast.
assertDType(self.v, "float32")
return ops.add(ops.cast(x, "float32"), self.v)
# A layer that is explicitly full precision.
class InnerLayerTwo(layers.Layer):
def __init__(self):
super().__init__(dtype="float32")
self.v = self.add_weight(
shape=(),
initializer="ones",
trainable=True,
)
self._build_at_init()
def call(self, x):
# Should not autocast.
assertDType(self.v, "float32")
return ops.add(x, self.v)
# A layer that is explicitly mixed precision but with autocast=False
# weight.
class InnerLayerThree(layers.Layer):
def __init__(self):
super().__init__(dtype="mixed_float16")
self.v = self.add_weight(
shape=(),
initializer="ones",
trainable=True,
autocast=False,
)
self._build_at_init()
def call(self, x):
# Should not autocast `self.v`.
assertDType(self.v, "float32")
return ops.add(x, self.v)
# A layer that is explicitly mixed precision with inner layers.
class MixedPrecisionLayer(layers.Layer):
def __init__(self):
super().__init__(dtype="mixed_float16")
self.v = self.add_weight(
shape=(),
initializer="ones",
trainable=True,
)
self.inner_one = InnerLayerOne()
self.inner_two = InnerLayerTwo()
self.inner_three = InnerLayerThree()
self._build_at_init()
def call(self, x):
# Should autocast.
assertDType(self.v, "float16")
return self.inner_three(
self.inner_two(self.inner_one(ops.add(x, self.v)))
)
layer = MixedPrecisionLayer()
y = layer(np.array(0.0))
self.assertEqual(y, 4.0)
def test_autocast_with_np_array(self):
assertDType = self.assertDType
class CustomLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, x):
# Here are the assertions.
assertDType(x[0], "float32") # Cast to compute_dtype
assertDType(x[1], "int32") # Untouched
x = [np.zeros(1, dtype="float64"), np.zeros(1, dtype="int32")]
CustomLayer()(x)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not supported with numpy"
)
def test_keras_mask_with_autocast(self):
assertAllEqual = self.assertAllEqual
assertDType = self.assertDType
class CustomLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, x, mask=None):
assert mask is not None
assertDType(x, "float16")
return x
x = ops.zeros((1, 2), dtype="float32")
mask = ops.array([True, False])
backend.set_keras_mask(x, mask)
y = CustomLayer(dtype="float16")(x)
assertAllEqual(
mask,
backend.get_keras_mask(y),
"Masking is not propagated by Autocast",
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not supported with numpy"
)
def test_end_to_end_masking(self):
# Check that masking survives compilation
model = models.Sequential(
[
layers.Embedding(
2, 2, mask_zero=True, embeddings_initializer="ones"
),
]
)
model.compile(loss="mse")
targets = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [1.0, 1.0]]])
loss = model.evaluate(np.array([[1, 0, 0, 1]]), targets, verbose=0)
self.assertAllClose(loss, 0.0)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not supported with numpy"
)
def test_masking(self):
class BasicMaskedLayer(layers.Layer):
def __init__(self):
super().__init__()
self.supports_masking = True
def call(self, x, mask=None):
assert mask is not None
return x
layer = BasicMaskedLayer()
x = backend.numpy.ones((4, 4))
mask = backend.numpy.ones((4,))
backend.set_keras_mask(x, mask)
layer(x)
layer(backend.numpy.ones((4, 4)), mask=backend.numpy.ones((4,)))
class NestedInputMaskedLayer(layers.Layer):
def __init__(self):
super().__init__()
self.supports_masking = True
def call(self, x, mask=None):
assert isinstance(x, list)
assert len(x) == 2
assert isinstance(mask, list)
assert len(mask) == 2
return x
layer = NestedInputMaskedLayer()
x1 = backend.numpy.ones((4, 4))
mask1 = backend.numpy.ones((4,))
backend.set_keras_mask(x1, mask1)
x2 = backend.numpy.ones((4, 4))
mask2 = backend.numpy.ones((4,))
backend.set_keras_mask(x2, mask2)
layer([x1, x2])
layer(
[backend.numpy.ones((4, 4)), backend.numpy.ones((4, 4))],
mask=[backend.numpy.ones((4,)), backend.numpy.ones((4,))],
)
class PositionalInputsMaskedLayer(layers.Layer):
def __init__(self):
super().__init__()
self.supports_masking = True
def call(self, x1, x2, x1_mask=None, x2_mask=None):
assert x1_mask is not None
assert x2_mask is not None
return x1 + x2
layer = PositionalInputsMaskedLayer()
layer(x1, x2)
layer(x1=x1, x2=x2)
class PositionalNestedInputsMaskedLayer(layers.Layer):
def __init__(self):
super().__init__()
self.supports_masking = True
def call(self, x1, x2, x1_mask=None, x2_mask=None):
assert isinstance(x1, tuple)
assert x1_mask is not None
assert x2_mask is not None
assert isinstance(x1_mask, tuple)
return x1[0] + x1[1] + x2
layer = PositionalNestedInputsMaskedLayer()
x1_1 = backend.numpy.ones((4, 4))
mask1 = backend.numpy.ones((4,))
backend.set_keras_mask(x1_1, mask1)
x1_2 = backend.numpy.ones((4, 4))
mask2 = backend.numpy.ones((4,))
backend.set_keras_mask(x1_2, mask2)
x2 = backend.numpy.ones((4, 4))
mask2 = backend.numpy.ones((4,))
backend.set_keras_mask(x2, mask2)
layer((x1_1, x1_2), x2)
layer(x1=(x1_1, x1_2), x2=x2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/attention_test.py | keras/src/layers/attention/attention_test.py | import numpy as np
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AttentionTest(testing.TestCase):
def test_attention_basics(self):
# No scale, no concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"score_mode": "dot",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
# Scale and concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"use_scale": True,
"score_mode": "concat",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
# Dot.
layer = layers.Attention(score_mode="dot")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output,
[[[2.462, 3.462], [1.538, 2.538]]],
atol=1e-3,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
scores,
[[[0.269, 0.731], [0.731, 0.269]]],
atol=1e-3,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
# Concat.
layer = layers.Attention(score_mode="concat")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output,
[[[1.727, 2.727], [2.272, 3.272]]],
atol=1e-3,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
scores,
[[[0.636, 0.363], [0.363, 0.636]]],
atol=1e-3,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_attention_with_mask(self):
layer = layers.Attention()
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
query_mask = np.array([[True, False]])
value_mask = np.array([[True, False]])
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertAllClose(output, [[[1.0, 1.0], [0.0, 0.0]]])
self.assertAllClose(scores, [[[1.0, 0.0], [1.0, 0.0]]])
def test_attention_2D_mask_shape_mismatch(self):
layer = layers.Attention()
batch_size, Tq, Tv, dim = 2, 3, 4, 5
query = np.random.random((batch_size, Tq, dim)).astype(np.float32)
value = np.random.random((batch_size, Tv, dim)).astype(np.float32)
query_mask = np.array([[True, False, True], [True, False, True]])
value_mask = np.array(
[[True, False, True, True], [True, False, True, True]]
)
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertEqual(output.shape, (batch_size, Tq, dim))
self.assertEqual(scores.shape, (batch_size, Tq, Tv))
def test_attention_errors(self):
layer = layers.Attention()
tensor = np.array([[[1.0, 1.0], [1.0, 1.0]]])
with self.assertRaisesRegex(ValueError, "must be called on a list"):
layer(tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor, tensor, tensor])
with self.assertRaisesRegex(ValueError, "layer mask must be a list"):
layer([tensor, tensor], mask=tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor], mask=[tensor])
def test_attention_with_dropout(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
layer_with_dropout = layers.Attention(dropout=0.2)
layer_without_dropout = layers.Attention()
output1, scores1 = layer_with_dropout(
[query, value], return_attention_scores=True, training=True
)
output2, scores2 = layer_without_dropout(
[query, value], return_attention_scores=True, training=True
)
self.assertNotAllClose(output1, output2)
self.assertNotAllClose(scores1, scores2)
def test_attention_invalid_score_mode(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument score_mode. "
"Expected one of {'dot', 'concat'}",
):
layers.Attention(score_mode="invalid_mode")
def test_attention_calculate_scores_with_scale(self):
query = np.random.random((2, 3, 4))
key = np.random.random((2, 4, 4))
layer = layers.Attention(use_scale=True, score_mode="dot")
layer.build(input_shape=[(2, 3, 4), (2, 4, 4)])
expected_scores = np.matmul(query, key.transpose((0, 2, 1)))
expected_scores *= layer.scale.numpy()
actual_scores = layer._calculate_scores(query, key)
self.assertAllClose(
actual_scores, expected_scores, tpu_atol=1e-2, tpu_rtol=1e-2
)
def test_attention_calculate_score_mask_no_causal_no_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
mask = layer._calculate_score_mask(
scores, v_mask=None, use_causal_mask=False
)
self.assertIsNone(
mask,
"Mask should be None when no causal mask and no value mask "
"are used",
)
def test_attention_calculate_score_mask_with_causal_no_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
causal_mask = layer._calculate_score_mask(
scores, v_mask=None, use_causal_mask=True
)
expected_causal_mask = np.tril(
np.ones((1, scores.shape[1], scores.shape[2])), k=0
)
self.assertAllClose(causal_mask, expected_causal_mask, atol=1e-6)
def test_attention_calculate_score_mask_with_causal_and_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
v_mask = np.array([[True, False, True, False]])
combined_mask = layer._calculate_score_mask(
scores, v_mask=v_mask, use_causal_mask=True
)
expected_causal_mask = np.tril(
np.ones((1, scores.shape[1], scores.shape[2])), k=0
)
expected_combined_mask = np.logical_and(
expected_causal_mask, v_mask[:, np.newaxis, :]
)
self.assertAllClose(combined_mask, expected_combined_mask, atol=1e-6)
def test_attention_compute_mask_with_no_mask(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
self.assertIsNone(
layer.compute_mask(inputs=dummy_inputs, mask=None),
"compute_mask should return None when mask is None",
)
def test_attention_compute_mask_with_first_element_none(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
mask = [None, np.array([True, False, True])]
self.assertIsNone(
layer.compute_mask(inputs=dummy_inputs, mask=mask),
"compute_mask should return None when the first element is None",
)
def test_attention_compute_mask_does_not_return_none_with_valid_mask(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
valid_mask = np.array([True, False, True])
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertIsNotNone(
computed_mask,
"compute_mask should not return None with a valid mask",
)
def test_attention_compute_mask_returns_correct_tensor_with_valid_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
valid_mask = np.array([True, False, True])
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(
np.array_equal(computed_mask, valid_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_returns_correct_tensor_with_all_true_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([True, True, True])
mask = [valid_mask, np.array([True, True, True])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = np.array([True, True, True])
self.assertTrue(
np.array_equal(computed_mask, expected_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_returns_correct_tensor_with_all_false_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([False, False, False])
mask = [valid_mask, np.array([False, False, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = np.array([False, False, False])
self.assertTrue(
np.array_equal(computed_mask, expected_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_with_tolerance_1e_3(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-3),
"Incorrect mask tensor within tolerance 1e-3",
)
def test_attention_compute_mask_with_tolerance_1e_5(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-5),
"Incorrect mask tensor within tolerance 1e-5",
)
def test_attention_compute_mask_with_tolerance_1e_7(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-7),
"Incorrect mask tensor within tolerance 1e-7 ",
)
def test_attention_compute_mask_with_single_element_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([True])
mask = [valid_mask, np.array([False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_shape = (1,)
self.assertEqual(computed_mask.shape, expected_shape)
def test_attention_compute_mask_with_non_boolean_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1, 0, 1])
mask = [valid_mask, np.array([0, 1, 0])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, valid_mask))
def test_attention_compute_mask_with_edge_case_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
edge_case_masks = [
np.array([True, True, True]),
np.array([False, False, False]),
np.array([True, False, True]),
]
for mask in edge_case_masks:
computed_mask = layer.compute_mask(
inputs=dummy_inputs, mask=[mask, mask]
)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, mask))
def test_attention_compute_mask_with_different_input_shapes(self):
layer = layers.Attention()
input_shapes = [(2, 3, 4), (3, 2, 5), (4, 1, 6)]
valid_mask = np.array([True, False, True])
for shape in input_shapes:
dummy_inputs = [np.ones(shape), np.ones(shape)]
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, valid_mask))
def test_attention_compute_output_shape(self):
layer = layers.Attention()
query = np.random.random((2, 3, 4))
value = np.random.random((2, 3, 5))
key = np.random.random((2, 3, 4))
layer = layers.Attention()
output = layer([query, value, key])
self.assertAllEqual(output.shape, value.shape)
self.assertAllEqual(
layer.compute_output_shape(
input_shape=[query.shape, value.shape, key.shape]
),
output.shape,
)
def test_return_attention_scores_true(self):
"""Test that the layer returns attention scores along with outputs."""
# Generate dummy input data
query = np.random.random((2, 8, 16)).astype(np.float32)
value = np.random.random((2, 4, 16)).astype(np.float32)
# Initialize the Attention layer
layer = layers.Attention()
# Call the layer with return_attention_scores=True
output, attention_scores = layer(
[query, value], return_attention_scores=True
)
# Check the shape of the outputs
self.assertEqual(output.shape, (2, 8, 16)) # Output shape
self.assertEqual(
attention_scores.shape, (2, 8, 4)
) # Attention scores shape
def test_return_attention_scores_true_and_tuple(self):
"""Test that the layer outputs are a tuple when
return_attention_scores=True."""
# Generate dummy input data
query = np.random.random((2, 8, 16)).astype(np.float32)
value = np.random.random((2, 4, 16)).astype(np.float32)
# Initialize the Attention layer
layer = layers.Attention()
# Call the layer with return_attention_scores=True
outputs = layer([query, value], return_attention_scores=True)
# Check that outputs is a tuple
self.assertIsInstance(
outputs, tuple, "Expected the outputs to be a tuple"
)
def test_return_attention_scores_true_tuple_then_unpack(self):
"""Test that outputs can be unpacked correctly."""
# Generate dummy input data
query = np.random.random((2, 8, 16)).astype(np.float32)
value = np.random.random((2, 4, 16)).astype(np.float32)
# Initialize the Attention layer
layer = layers.Attention()
# Call the layer with return_attention_scores=True
outputs = layer([query, value], return_attention_scores=True)
# Unpack the outputs
output, attention_scores = outputs
# Check the shape of the unpacked outputs
self.assertEqual(output.shape, (2, 8, 16)) # Output shape
self.assertEqual(
attention_scores.shape, (2, 8, 4)
) # Attention scores shape
def test_return_attention_scores_with_symbolic_tensors(self):
"""Test to check outputs with symbolic tensors with
return_attention_scores = True"""
attention = layers.Attention()
x = layers.Input(shape=(3, 5))
y = layers.Input(shape=(4, 5))
output, attention_scores = attention(
[x, y], return_attention_scores=True
)
self.assertEqual(output.shape, (None, 3, 5)) # Output shape
self.assertEqual(attention_scores.shape, (None, 3, 4))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/additive_attention_test.py | keras/src/layers/attention/additive_attention_test.py | import numpy as np
from keras.src import layers
from keras.src import testing
class AdditiveAttentionTest(testing.TestCase):
def test_attention_basics(self):
# No scale
self.run_layer_test(
layers.AdditiveAttention,
init_kwargs={
"use_scale": True,
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
# With scale.
self.run_layer_test(
layers.AdditiveAttention,
init_kwargs={
"use_scale": False,
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
layer = layers.AdditiveAttention(use_scale=False)
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output,
[[[1.727, 2.727], [2.272, 3.272]]],
atol=1e-3,
tpu_atol=1e-2,
)
self.assertAllClose(
scores,
[[[0.636, 0.363], [0.363, 0.636]]],
atol=1e-3,
tpu_atol=1e-2,
)
def test_attention_with_mask(self):
layer = layers.AdditiveAttention(use_scale=False)
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
query_mask = np.array([[True, False]])
value_mask = np.array([[True, False]])
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertAllClose(output, [[[1.0, 1.0], [0.0, 0.0]]])
self.assertAllClose(scores, [[[1.0, 0.0], [1.0, 0.0]]])
def test_attention_errors(self):
layer = layers.AdditiveAttention()
tensor = np.array([[[1.0, 1.0], [1.0, 1.0]]])
with self.assertRaisesRegex(ValueError, "must be called on a list"):
layer(tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor, tensor, tensor])
with self.assertRaisesRegex(ValueError, "layer mask must be a list"):
layer([tensor, tensor], mask=tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor], mask=[tensor])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/multi_head_attention.py | keras/src/layers/attention/multi_head_attention.py | import math
import string
import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.backend.config import is_flash_attention_enabled
from keras.src.layers.activations.softmax import Softmax
from keras.src.layers.core.einsum_dense import EinsumDense
from keras.src.layers.layer import Layer
from keras.src.layers.regularization.dropout import Dropout
@keras_export("keras.layers.MultiHeadAttention")
class MultiHeadAttention(Layer):
"""MultiHeadAttention layer.
This is an implementation of multi-headed attention as described in the
paper "Attention is all you Need"
[Vaswani et al., 2017](https://arxiv.org/abs/1706.03762).
If `query`, `key,` `value` are the same, then
this is self-attention. Each timestep in `query` attends to the
corresponding sequence in `key`, and returns a fixed-width vector.
This layer first projects `query`, `key` and `value`. These are
(effectively) a list of tensors of length `num_attention_heads`, where the
corresponding shapes are `(batch_size, <query dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, value_dim)`.
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor.
Finally, the result tensor with the last dimension as `value_dim` can take
a linear projection and return.
Args:
num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value.
dropout: Dropout probability.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
output_shape: The expected shape of an output tensor, besides the batch
and sequence dims. If not specified, projects back to the query
feature dim (the query input's last dimension).
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
flash_attention: If `None`, the layer attempts to use flash
attention for faster and more memory-efficient attention
computations when possible. This behavior can be configured using
`keras.config.enable_flash_attention()` or
`keras.config.disable_flash_attention()`.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
seed: Optional integer to seed the dropout layer.
Call arguments:
query: Query tensor of shape `(B, T, dim)`, where `B` is the batch size,
`T` is the target sequence length, and dim is the feature dimension.
value: Value tensor of shape `(B, S, dim)`, where `B` is the batch size,
`S` is the source sequence length, and dim is the feature dimension.
key: Optional key tensor of shape `(B, S, dim)`. If not given, will
use `value` for both `key` and `value`, which is the most common
case.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. The boolean mask specifies which
query elements can attend to which key elements, 1 indicates
attention and 0 indicates no attention. Broadcasting can happen for
the missing batch dimensions and the head dimension.
return_attention_scores: A boolean to indicate whether the output should
be `(attention_output, attention_scores)` if `True`, or
`attention_output` if `False`. Defaults to `False`.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Will go with either using the training mode of the parent
layer/model, or `False` (inference) if there is no parent layer.
use_causal_mask: A boolean to indicate whether to apply a causal mask to
prevent tokens from attending to future tokens (e.g., used in a
decoder Transformer).
Returns:
attention_output: The result of the computation, of shape `(B, T, E)`,
where `T` is for target sequence shapes and `E` is the query input
last dimension if `output_shape` is `None`. Otherwise, the
multi-head outputs are projected to the shape specified by
`output_shape`.
attention_scores: (Optional) multi-head attention coefficients over
attention axes.
"""
def __init__(
self,
num_heads,
key_dim,
value_dim=None,
dropout=0.0,
use_bias=True,
output_shape=None,
attention_axes=None,
flash_attention=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self._num_heads = num_heads
self._key_dim = key_dim
self._value_dim = value_dim if value_dim else key_dim
self._dropout = dropout
self._use_bias = use_bias
if output_shape:
if isinstance(output_shape, int):
output_shape = (output_shape,)
try:
output_shape = tuple(output_shape)
except:
raise ValueError(
f"Invalid `output_shape`: {output_shape}. When "
"specified, the `output_shape` should be of type tuple, "
"list, or int."
)
self._output_shape = output_shape
self._flash_attention = flash_attention or is_flash_attention_enabled()
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
self._kernel_regularizer = regularizers.get(kernel_regularizer)
self._bias_regularizer = regularizers.get(bias_regularizer)
self._activity_regularizer = regularizers.get(activity_regularizer)
self._kernel_constraint = constraints.get(kernel_constraint)
self._bias_constraint = constraints.get(bias_constraint)
if isinstance(attention_axes, int):
attention_axes = (attention_axes,)
elif attention_axes and not isinstance(attention_axes, (list, tuple)):
raise ValueError(
"`attention_axes` must be an int, list, or tuple."
f"Received: attention_axes={attention_axes}"
)
self._attention_axes = attention_axes
self.seed = seed
self._inverse_sqrt_key_dim = 1.0 / math.sqrt(float(self._key_dim))
# Check for flash attention constraints
if self._flash_attention and self._dropout > 0.0:
raise ValueError(
"Dropout is not supported when flash attention is enabled. "
"Please set dropout to 0.0 to use flash attention."
)
@property
def num_heads(self):
return self._num_heads
@property
def key_dim(self):
return self._key_dim
@property
def value_dim(self):
return self._value_dim
@property
def dropout(self):
return self._dropout
@property
def use_bias(self):
return self._use_bias
# Avoid exposing `output_shape` as it may conflict with `Functional` and
# `Sequential` models when calling `summary()`.
@property
def attention_axes(self):
return self._attention_axes
def get_config(self):
base_config = super().get_config()
config = {
"num_heads": self._num_heads,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"dropout": self._dropout,
"use_bias": self._use_bias,
"output_shape": self._output_shape,
"attention_axes": self._attention_axes,
"kernel_initializer": initializers.serialize(
self._kernel_initializer
),
"bias_initializer": initializers.serialize(self._bias_initializer),
"kernel_regularizer": regularizers.serialize(
self._kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self._bias_regularizer),
"activity_regularizer": regularizers.serialize(
self._activity_regularizer
),
"kernel_constraint": constraints.serialize(self._kernel_constraint),
"bias_constraint": constraints.serialize(self._bias_constraint),
"seed": self.seed,
}
return {**base_config, **config}
def build(
self,
query_shape,
value_shape,
key_shape=None,
):
"""Builds layers and variables.
Args:
query_shape: Shape of the `query` tensor.
value_shape: Shape of the `value` tensor.
key: Optional shape of the `key` tensor.
"""
key_shape = value_shape if key_shape is None else key_shape
if value_shape[1:-1] != key_shape[1:-1]:
raise ValueError(
"All dimensions of `value` and `key`, except the last one, "
f"must be equal. Received: value_shape={value_shape} and "
f"key_shape={key_shape}"
)
query_rank = len(query_shape)
value_rank = len(value_shape)
key_rank = len(key_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
query_rank - 1, bound_dims=1, output_dims=2
)
self._query_dense = EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=bias_axes if self._use_bias else None,
name="query",
**self._get_common_kwargs_for_sublayer(),
)
self._query_dense.build(query_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
key_rank - 1, bound_dims=1, output_dims=2
)
self._key_dense = EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=bias_axes if self._use_bias else None,
name="key",
**self._get_common_kwargs_for_sublayer(),
)
self._key_dense.build(key_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
value_rank - 1, bound_dims=1, output_dims=2
)
self._value_dense = EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._value_dim]
),
bias_axes=bias_axes if self._use_bias else None,
name="value",
**self._get_common_kwargs_for_sublayer(),
)
self._value_dense.build(value_shape)
# Builds the attention computations for multi-head dot product
# attention. These computations could be wrapped into the keras
# attention layer once it supports multi-head einsum computations.
self._build_attention(output_rank)
self._output_dense = self._make_output_dense(
query_shape,
self._get_common_kwargs_for_sublayer(),
"attention_output",
)
output_dense_input_shape = list(
self._query_dense.compute_output_shape(query_shape)
)
output_dense_input_shape[-1] = self._value_dim
self._output_dense.build(tuple(output_dense_input_shape))
@property
def query_dense(self):
return self._query_dense
@property
def key_dense(self):
return self._key_dense
@property
def value_dense(self):
return self._value_dense
@property
def output_dense(self):
return self._output_dense
def _get_common_kwargs_for_sublayer(self):
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
dtype=self.dtype_policy,
)
# Create new clone of kernel/bias initializer, so that we don't reuse
# the initializer instance, which could lead to same init value since
# initializer is stateless.
kernel_initializer = self._kernel_initializer.__class__.from_config(
self._kernel_initializer.get_config()
)
bias_initializer = self._bias_initializer.__class__.from_config(
self._bias_initializer.get_config()
)
common_kwargs["kernel_initializer"] = kernel_initializer
common_kwargs["bias_initializer"] = bias_initializer
return common_kwargs
def _make_output_dense(self, query_shape, common_kwargs, name=None):
"""Builds the output projection matrix.
Args:
free_dims: Number of free dimensions for einsum equation building.
common_kwargs: Common keyword arguments for einsum layer.
name: Name for the projection layer.
Returns:
Projection layer.
"""
query_rank = len(query_shape)
if self._output_shape:
output_shape = self._output_shape
else:
output_shape = [query_shape[-1]]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
query_rank - 1, bound_dims=2, output_dims=len(output_shape)
)
return EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if self._use_bias else None,
name=name,
**common_kwargs,
)
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(
axis if axis >= 0 else (rank - 1) + axis
for axis in self._attention_axes
)
(
self._dot_product_equation,
self._combine_equation,
attn_scores_rank,
) = _build_attention_equation(rank, attn_axes=self._attention_axes)
norm_axes = tuple(
range(
attn_scores_rank - len(self._attention_axes), attn_scores_rank
)
)
self._softmax = Softmax(axis=norm_axes, dtype=self.dtype_policy)
self._dropout_layer = Dropout(
rate=self._dropout, dtype=self.dtype_policy, seed=self.seed
)
def _masked_softmax(self, attention_scores, attention_mask=None):
# Normalize the attention scores to probabilities.
# attention_scores = [B, N, T, S]
if attention_mask is not None:
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims,
# key_attention_dims>)
mask_expansion_axis = -len(self._attention_axes) * 2 - 1
for _ in range(
len(attention_scores.shape) - len(attention_mask.shape)
):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
return self._softmax(attention_scores, mask=attention_mask)
def _compute_attention(
self,
query,
key,
value,
attention_mask=None,
training=None,
return_attention_scores=False,
):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for
customized attention implementation.
Args:
query: Projected query tensor of shape `(B, T, N, key_dim)`.
key: Projected key tensor of shape `(B, S, N, key_dim)`.
value: Projected value tensor of shape `(B, S, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. It is generally not needed if
the `query` and `value` (and/or `key`) are masked.
training: Python boolean indicating whether the layer should behave
in training mode (adding dropout) or in inference mode (doing
nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Check for flash attention constraints
if self._flash_attention and return_attention_scores:
raise ValueError(
"Returning attention scores is not supported when flash "
"attention is enabled. Please disable flash attention to access"
" attention scores."
)
# Determine whether to use dot-product attention
use_dot_product_attention = not (
self._dropout > 0.0
or return_attention_scores
or (len(query.shape) != 4)
)
if use_dot_product_attention:
if attention_mask is not None:
# Ensure attention_mask has the correct shape for broadcasting
# Expected shape: [batch_size, num_heads, query_seq_len,
# key_seq_len].
mask_expansion_axis = -len(self._attention_axes) * 2 - 1
len_attention_scores_shape = 4 # Only accepts 4D inputs
for _ in range(
len_attention_scores_shape - len(attention_mask.shape)
):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
attention_mask = ops.cast(attention_mask, dtype="bool")
# Directly compute the attention output using dot-product attention
attention_output = ops.dot_product_attention(
query=query,
key=key,
value=value,
bias=None,
mask=attention_mask,
scale=self._inverse_sqrt_key_dim,
is_causal=False,
flash_attention=self._flash_attention,
)
return attention_output, None
# Default behavior without flash attention, with explicit attention
# scores
query = ops.multiply(
query, ops.cast(self._inverse_sqrt_key_dim, query.dtype)
)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = ops.einsum(self._dot_product_equation, key, query)
# Apply the mask using the custom masked softmax
attention_scores = self._masked_softmax(
attention_scores, attention_mask
)
# Apply dropout to the attention scores if needed
if self._dropout > 0.0:
final_attn_scores = self._dropout_layer(
attention_scores, training=training
)
else:
final_attn_scores = attention_scores
# `context_layer` = [B, T, N, H]
attention_output = ops.einsum(
self._combine_equation, final_attn_scores, value
)
return attention_output, attention_scores
def call(
self,
query,
value,
key=None,
query_mask=None,
value_mask=None,
key_mask=None,
attention_mask=None,
return_attention_scores=False,
training=None,
use_causal_mask=False,
):
if key is None:
key = value
# Delete the masks because the masks are handled at the level of the
# layer
query_mask = backend.get_keras_mask(query)
backend.set_keras_mask(query, None)
backend.set_keras_mask(value, None)
backend.set_keras_mask(key, None)
attention_mask = self._compute_attention_mask(
query,
value,
query_mask=query_mask,
value_mask=value_mask,
key_mask=key_mask,
attention_mask=attention_mask,
use_causal_mask=use_causal_mask,
)
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N, H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, H]
value = self._value_dense(value)
attention_output, attention_scores = self._compute_attention(
query,
key,
value,
attention_mask,
training,
return_attention_scores,
)
attention_output = self._output_dense(attention_output)
# Set mask on output if needed
if query_mask is not None:
backend.set_keras_mask(attention_output, query_mask)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
def _compute_attention_mask(
self,
query,
value,
query_mask=None,
value_mask=None,
key_mask=None,
attention_mask=None,
use_causal_mask=False,
):
"""Computes the attention mask, using the Keras masks of the inputs.
* The `query`'s mask is reshaped from [B, T] to [B, T, 1].
* The `value`'s mask is reshaped from [B, S] to [B, 1, S].
* The `key`'s mask is reshaped from [B, S] to [B, 1, S]. The `key`'s
mask is ignored if `key` is `None` or if `key is value`.
* If `use_causal_mask=True`, then the causal mask is computed. Its shape
is [1, T, S].
All defined masks are merged using a logical AND operation (`&`).
In general, if the `query` and `value` are masked, then there is no need
to define the `attention_mask`.
Args:
query: Projected query tensor of shape `(B, T, N, key_dim)`.
key: Projected key tensor of shape `(B, T, N, key_dim)`.
value: Projected value tensor of shape `(B, T, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
use_causal_mask: A boolean to indicate whether to apply a causal
mask to prevent tokens from attending to future tokens (e.g.,
used in a decoder Transformer).
Returns:
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions, based on the Keras masks of the
`query`, `key`, `value`, and `attention_mask` tensors, and the
causal mask if `use_causal_mask=True`.
"""
auto_mask = None
if query_mask is not None:
query_mask = ops.cast(query_mask, "bool") # defensive casting
# B = batch size, T = max query length
auto_mask = ops.expand_dims(query_mask, -1) # shape is [B, T, 1]
if value_mask is not None:
value_mask = ops.cast(value_mask, "bool") # defensive casting
# B = batch size, S == max value length
mask = ops.expand_dims(value_mask, -2) # shape is [B, 1, S]
auto_mask = mask if auto_mask is None else auto_mask & mask
if key_mask is not None:
key_mask = ops.cast(key_mask, "bool") # defensive casting
# B == batch size, S == max key length == max value length
mask = ops.expand_dims(key_mask, -2) # shape is [B, 1, S]
auto_mask = mask if auto_mask is None else auto_mask & mask
if use_causal_mask:
# the shape of the causal mask is [1, T, S]
mask = self._compute_causal_mask(query, value)
auto_mask = mask if auto_mask is None else auto_mask & mask
if attention_mask is not None:
attention_mask = ops.cast(attention_mask, "bool")
if auto_mask is not None:
# merge attention_mask & automatic mask, to shape [B, T, S]
attention_mask = (
auto_mask
if attention_mask is None
else attention_mask & auto_mask
)
return attention_mask
def _compute_causal_mask(self, query, value=None):
"""Computes a causal mask (e.g., for masked self-attention layers).
For example, if query and value both contain sequences of length 4,
this function returns a boolean tensor equal to:
```
[[[True, False, False, False],
[True, True, False, False],
[True, True, True, False],
[True, True, True, True]]]
```
Args:
query: query tensor of shape `(B, T, ...)`.
value: value tensor of shape `(B, S, ...)` (optional, defaults to
query).
Returns:
mask: a boolean tensor of shape `(1, T, S)` containing a lower
triangular matrix of shape `(T, S)`.
"""
q_seq_length = ops.shape(query)[1]
v_seq_length = q_seq_length if value is None else ops.shape(value)[1]
ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype="int32")
row_index = ops.cumsum(ones_mask, axis=-2)
col_index = ops.cumsum(ones_mask, axis=-1)
return ops.greater_equal(row_index, col_index)
def compute_output_shape(
self,
query_shape,
value_shape,
key_shape=None,
):
query_shape = tuple(query_shape)
value_shape = tuple(value_shape)
if key_shape is None:
key_shape = value_shape
else:
key_shape = tuple(key_shape)
if value_shape[1:-1] != key_shape[1:-1]:
raise ValueError(
"All dimensions of `value` and `key`, except the last one, "
f"must be equal. Received: value_shape={value_shape} and "
f"key_shape={key_shape}"
)
if self._output_shape:
query_shape = query_shape[:-1] + self._output_shape
return query_shape
def compute_output_spec(
self,
query,
value,
key=None,
query_mask=None,
value_mask=None,
key_mask=None,
attention_mask=None,
return_attention_scores=False,
training=None,
use_causal_mask=False,
):
if key is not None:
key_shape = key.shape
else:
key_shape = None
output_shape = self.compute_output_shape(
query.shape, value.shape, key_shape
)
output_spec = backend.KerasTensor(
output_shape, dtype=self.compute_dtype
)
if return_attention_scores:
length = query.shape[1]
attention_shape = (query.shape[0], self.num_heads, length, length)
return output_spec, backend.KerasTensor(
attention_shape, dtype=self.compute_dtype
)
return output_spec
def _index_to_einsum_variable(i):
"""Converts an index to a einsum variable name.
We simply map indices to lowercase characters, e.g. 0 -> 'a', 1 -> 'b'.
"""
return string.ascii_lowercase[i]
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
1. Query-key dot product:
(<batch dims>, <query attention dims>, num_heads, channels),
(<batch dims>, <key attention dims>, num_heads, channels) ->
(<batch dims>, num_heads, <query attention dims>, <key attention dims>)
2. Combination:
(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch
dims>, <query attention dims>, num_heads, channels)
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = ""
for i in range(rank):
target_notation += _index_to_einsum_variable(i)
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _index_to_einsum_variable(letter_offset)
letter_offset += 1
product_notation = "".join(
[target_notation[i] for i in batch_dims]
+ [target_notation[i] for i in attn_axes]
+ [source_notation[i] for i in attn_axes]
)
dot_product_equation = "%s,%s->%s" % (
source_notation,
target_notation,
product_notation,
)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (
product_notation,
source_notation,
target_notation,
)
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _index_to_einsum_variable(i + letter_offset)
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _index_to_einsum_variable(i + letter_offset)
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _index_to_einsum_variable(i + letter_offset)
kernel_str += char
output_str += char
bias_axes += char
equation = f"{input_str},{kernel_str}->{output_str}"
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/grouped_query_attention_test.py | keras/src/layers/attention/grouped_query_attention_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import testing
from keras.src.backend.config import disable_flash_attention
from keras.src.backend.config import enable_flash_attention
from keras.src.backend.config import is_flash_attention_enabled
class GroupedQueryAttentionTest(testing.TestCase):
def setUp(self):
super().setUp()
# Flash attention is a newly introduced feature. We need to disable it
# for testing purposes.
disable_flash_attention()
def tearDown(self):
enable_flash_attention()
return super().tearDown()
def test_basics(self):
self.assertFalse(is_flash_attention_enabled())
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs={
"num_query_heads": 2,
"num_key_value_heads": 2,
"head_dim": 2,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs={
"num_query_heads": 2,
"num_key_value_heads": 2,
"head_dim": 2,
"use_bias": False,
"dropout": 0.5,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=4,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_basics_with_flash_attention(self):
enable_flash_attention()
init_kwargs = {
"num_query_heads": 2,
"num_key_value_heads": 2,
"head_dim": 8,
"dtype": "float16",
}
input_shape = {
"query_shape": (2, 8, 16),
"value_shape": (2, 4, 16),
}
expected_output_shape = (2, 8, 16)
if backend.backend() in ("tensorflow", "numpy"):
self.skipTest(
"Flash attention is not supported in tensorflow and numpy "
"backends."
)
elif backend.backend() == "torch":
try:
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs=init_kwargs,
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
except ImportError as e:
if "Flash attention is not supported" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"PyTorch version."
)
in str(e.args[0])
)
except RuntimeError as e:
if (
"Flash attention is not supported with the provided inputs"
in str(e.args[0])
):
self.assertTrue(
(
"Flash attention is not supported with the "
"provided inputs"
)
in str(e.args[0])
)
elif backend.backend() == "jax":
try:
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs=init_kwargs,
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
except ImportError as e:
if "Flash attention is not supported" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"JAX version."
)
in str(e.args[0])
)
except RuntimeError as e:
if "cuDNN" in str(e.args[0]):
self.assertTrue("cuDNN is not detected." in str(e.args[0]))
elif "Require at least" in str(e.args[0]):
self.assertTrue(
"Require at least Ampere arch to run" in str(e.args[0])
)
elif "Flash attention" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"JAX version."
)
in str(e.args[0])
)
@parameterized.named_parameters(
("without_key_proj_mha", (4, 8), (2, 8), None, 2, 2),
("with_key_proj_mha", (4, 8), (2, 8), (2, 3), 2, 2),
("without_key_proj_gqa", (4, 8), (2, 8), None, 4, 2),
("with_key_proj_gqa", (4, 8), (2, 8), (2, 3), 4, 2),
("without_key_value_proj_mqa", (4, 8), (2, 8), None, 4, 1),
("with_key_value_proj_mqa", (4, 8), (2, 8), (2, 3), 4, 1),
)
def test_compute_output_shape(
self,
query_dims,
value_dims,
key_dims,
num_query_heads,
num_key_value_heads,
):
"""Test computed shape is equal to the layer output's shape."""
layer = layers.GroupedQueryAttention(
num_query_heads=num_query_heads,
num_key_value_heads=num_key_value_heads,
head_dim=2,
)
batch_size = 7
query_shape = (batch_size,) + query_dims
value_shape = (batch_size,) + value_dims
key_shape = (batch_size,) + key_dims if key_dims else None
query = np.ones(query_shape)
value = np.ones(value_shape)
key = np.ones(key_shape) if key_shape else None
output = layer(query=query, value=value, key=key)
comp_output_shape = layer.compute_output_shape(
query_shape, value_shape, key_shape
)
self.assertEqual(output.shape, comp_output_shape)
@parameterized.named_parameters(
("query_value_dim_mismatch", (2, 4, 8), (2, 2, 7), 2),
("key_value_dim_mismatch", (2, 4, 8), (2, 2, 8), (2, 1, 7)),
)
def test_shape_mismatch_error(self, query_shape, value_shape, key_shape):
"""Test dimension mismatches"""
layer = layers.GroupedQueryAttention(
num_query_heads=4,
num_key_value_heads=4,
head_dim=2,
)
with self.assertRaisesRegex(ValueError, r"must be equal"):
layer.compute_output_shape(query_shape, value_shape, key_shape)
def test_initializer(self):
# Test with a specified initializer.
layer = layers.GroupedQueryAttention(
num_query_heads=16,
num_key_value_heads=16,
head_dim=64,
kernel_initializer=initializers.TruncatedNormal(stddev=0.02),
)
layer.build((2, 4, 8), (2, 4, 8))
# Make sure the sub layers have different kernel init value.
self.assertNotAllClose(
layer._query_dense.kernel,
layer._key_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._value_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._output_dense.kernel,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_query_mask_propagation(self):
"""Test automatic propagation of the query's mask."""
try:
layer = layers.GroupedQueryAttention(
num_query_heads=2, num_key_value_heads=2, head_dim=2
)
self.assertTrue(layer.supports_masking)
query = np.array(
[[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]]
)
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.random.normal(size=(3, 3, 8))
output = layer(query=masked_query, value=value)
except RuntimeError as e:
if e.args[0].startswith(
"(*bias): last dimension must be contiguous"
):
self.skipTest(
"PyTorch errors out on GPU: issue to track bug is here "
"https://github.com/keras-team/keras/issues/20459"
)
self.assertAllClose(masked_query._keras_mask, output._keras_mask)
@parameterized.named_parameters(("causal", True), ("not_causal", 0))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_masking(self, use_causal_mask):
"""Test that the value and causal masks are taken into account."""
layer = layers.GroupedQueryAttention(
num_query_heads=2, num_key_value_heads=2, head_dim=2
)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]])
masked_value = layers.Embedding(6, 8, mask_zero=True)(value)
output = layer(
query=masked_query,
value=masked_value,
use_causal_mask=use_causal_mask,
)
mask = np.array(
[[[1, 1, 0]] * 3 + [[0, 0, 0]] * 2]
+ [[[1, 0, 0]] * 5]
+ [[[1, 1, 1]] + [[0, 0, 0]] * 4]
).astype(bool)
if use_causal_mask:
mask = mask & np.array(
[[[1, 0, 0], [1, 1, 0]] + [[1, 1, 1]] * 3]
).astype(bool)
del masked_query._keras_mask
del masked_value._keras_mask
output_with_manual_mask = layer(
query=masked_query, value=masked_value, attention_mask=mask
)
self.assertAllClose(output, output_with_manual_mask)
@parameterized.named_parameters(
("disable_flash_attention", False), ("enable_flash_attention", True)
)
def test_correctness(self, flash_attention):
if flash_attention:
# Let the backend decide whether to use flash attention
enable_flash_attention()
dtype = "float16" # Flash attention only accepts float16/bfloat16
head_dim = 8 # key_dim % 8 == 0 to enable flash attention
num_query_heads = num_key_value_heads = 8
query = np.identity(head_dim)[np.newaxis, ...]
key = np.identity(head_dim)[np.newaxis, ...]
value = (
np.reshape(np.arange(head_dim * head_dim), (1, head_dim, head_dim))
/ 100.0 # Prevent overflow/underflow
)
# Setup layer.
layer = layers.GroupedQueryAttention(
head_dim=head_dim,
num_query_heads=num_query_heads,
num_key_value_heads=num_key_value_heads,
dtype=dtype,
)
layer.build(query.shape, key.shape, value.shape)
# Set layer weights.
kernel = np.identity(head_dim)
# To get an identity kernel we need to add a head dim and repeat on it.
kernel = np.repeat(kernel[:, np.newaxis, :], num_query_heads, axis=1)
# Zeros for all biases.
bias = np.zeros((num_query_heads, head_dim))
output_bias = np.zeros((head_dim,))
layer.set_weights([kernel, bias] * 3 + [kernel, output_bias])
# Call layer and assert output.
expected_output = np.array(
[2.406, 2.440, 2.473, 2.504, 2.535, 2.568, 2.602, 2.633]
)
expected_output = np.tile(
expected_output[np.newaxis, :, np.newaxis], (1, 1, head_dim)
)
expected_score = np.array(
[
[0.1187] * 0 + [0.1691] + [0.1187] * 7,
[0.1187] * 1 + [0.1691] + [0.1187] * 6,
[0.1187] * 2 + [0.1691] + [0.1187] * 5,
[0.1187] * 3 + [0.1691] + [0.1187] * 4,
[0.1187] * 4 + [0.1691] + [0.1187] * 3,
[0.1187] * 5 + [0.1691] + [0.1187] * 2,
[0.1187] * 6 + [0.1691] + [0.1187] * 1,
[0.1187] * 7 + [0.1691] + [0.1187] * 0,
]
)
expected_score = np.tile(
expected_score[np.newaxis, np.newaxis, ...], (1, head_dim, 1, 1)
)
if flash_attention:
output = layer(query=query, value=value, key=key)
self.assertAllClose(output, expected_output, atol=1e-2)
else:
output, scores = layer(
query=query,
value=value,
key=key,
return_attention_scores=True,
)
self.assertAllClose(output, expected_output, atol=1e-2)
self.assertAllClose(scores, expected_score, atol=1e-2)
def test_flash_attention_with_errors(self):
if backend.backend() in ("numpy", "tensorflow"):
pytest.skip(
reason=(
"Flash attention is not supported on tensorflow and numpy."
)
)
# Check `flash_attention=True` and `dropout=0.1`
with self.assertRaisesRegex(
ValueError,
"Dropout is not supported when flash attention is enabled.",
):
layer = layers.GroupedQueryAttention(
head_dim=2,
num_query_heads=2,
num_key_value_heads=2,
flash_attention=True,
dropout=0.1,
)
# Check `flash_attention=True` and `return_attention_scores=True`
layer = layers.GroupedQueryAttention(
head_dim=2,
num_query_heads=2,
num_key_value_heads=2,
flash_attention=True,
)
self.assertTrue(layer._flash_attention)
query = np.random.random((2, 4, 8))
value = np.random.random((2, 4, 8))
with self.assertRaisesRegex(
ValueError,
"Returning attention scores is not supported when flash "
"attention is enabled. Please disable flash attention to access"
" attention scores.",
):
layer(query=query, value=value, return_attention_scores=True)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/grouped_query_attention.py | keras/src/layers/attention/grouped_query_attention.py | import math
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.backend.config import is_flash_attention_enabled
from keras.src.layers.activations.softmax import Softmax
from keras.src.layers.core.einsum_dense import EinsumDense
from keras.src.layers.layer import Layer
from keras.src.layers.regularization.dropout import Dropout
@keras_export("keras.layers.GroupQueryAttention")
class GroupedQueryAttention(Layer):
"""Grouped Query Attention layer.
This is an implementation of grouped-query attention introduced by
[Ainslie et al., 2023](https://arxiv.org/abs/2305.13245). Here
`num_key_value_heads` denotes number of groups, setting
`num_key_value_heads` to 1 is equivalent to multi-query attention, and
when `num_key_value_heads` is equal to `num_query_heads` it is equivalent
to multi-head attention.
This layer first projects `query`, `key`, and `value` tensors. Then, `key`
and `value` are repeated to match the number of heads of `query`.
Then, the `query` is scaled and dot-producted with `key` tensors. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities and concatenated back to a single
tensor.
Args:
head_dim: Size of each attention head.
num_query_heads: Number of query attention heads.
num_key_value_heads: Number of key and value attention heads.
dropout: Dropout probability.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
flash_attention: If `None`, the layer attempts to use flash
attention for faster and more memory-efficient attention
computations when possible. This behavior can be configured using
`keras.config.enable_flash_attention()` or
`keras.config.disable_flash_attention()`.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
seed: Optional integer to seed the dropout layer.
Call arguments:
query: Query tensor of shape `(batch_dim, target_seq_len, feature_dim)`,
where `batch_dim` is batch size, `target_seq_len` is the length of
target sequence, and `feature_dim` is dimension of feature.
value: Value tensor of shape `(batch_dim, source_seq_len, feature_dim)`,
where `batch_dim` is batch size, `source_seq_len` is the length of
source sequence, and `feature_dim` is dimension of feature.
key: Optional key tensor of shape
`(batch_dim, source_seq_len, feature_dim)`. If not given, will use
`value` for both `key` and `value`, which is most common case.
attention_mask: A boolean mask of shape
`(batch_dim, target_seq_len, source_seq_len)`, that prevents
attention to certain positions. The boolean mask specifies which
query elements can attend to which key elements, where 1 indicates
attention and 0 indicates no attention. Broadcasting can happen for
the missing batch dimensions and the head dimension.
return_attention_scores: A boolean to indicate whether the output
should be `(attention_output, attention_scores)` if `True`, or
`attention_output` if `False`. Defaults to `False`.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Will go with either using the training mode of the parent
layer/model or `False` (inference) if there is no parent layer.
use_causal_mask: A boolean to indicate whether to apply a causal mask to
prevent tokens from attending to future tokens (e.g., used in a
decoder Transformer).
Returns:
attention_output: Result of the computation, of shape
`(batch_dim, target_seq_len, feature_dim)`, where `target_seq_len`
is for target sequence length and `feature_dim` is the query input
last dim.
attention_scores: (Optional) attention coefficients of shape
`(batch_dim, num_query_heads, target_seq_len, source_seq_len)`.
"""
def __init__(
self,
head_dim,
num_query_heads,
num_key_value_heads,
dropout=0.0,
use_bias=True,
flash_attention=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.head_dim = head_dim
self.num_query_heads = num_query_heads
self.num_key_value_heads = num_key_value_heads
if num_query_heads % num_key_value_heads != 0:
raise ValueError(
"`num_query_heads` must be divisible by `num_key_value_heads`."
)
self.num_repeats = num_query_heads // num_key_value_heads
self.dropout = dropout
self.use_bias = use_bias
self._flash_attention = flash_attention or is_flash_attention_enabled()
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.seed = seed
self._inverse_sqrt_head_dim = 1.0 / math.sqrt(float(self.head_dim))
self._return_attention_scores = False
# Check for flash attention constraints
if self._flash_attention and self.dropout > 0.0:
raise ValueError(
"Dropout is not supported when flash attention is enabled. "
"Please set dropout to 0.0 to use flash attention."
)
def build(
self,
query_shape,
value_shape,
key_shape=None,
):
# Einsum variables:
# b = batch size
# q = query length
# k = key/value length
# m = model dim
# u = num query heads
# v = num key/value heads
# h = head dim
key_shape = value_shape if key_shape is None else key_shape
self.feature_dim = query_shape[-1]
self._query_dense = EinsumDense(
"bqm,muh->bquh",
output_shape=(None, self.num_query_heads, self.head_dim),
bias_axes="uh" if self.use_bias else None,
name="query",
**self._get_common_kwargs_for_sublayer(),
)
self._query_dense.build(query_shape)
self._key_dense = EinsumDense(
"bkm,mvh->bkvh",
output_shape=(None, self.num_key_value_heads, self.head_dim),
bias_axes="vh" if self.use_bias else None,
name="key",
**self._get_common_kwargs_for_sublayer(),
)
self._key_dense.build(key_shape)
self._value_dense = EinsumDense(
"bkm,mvh->bkvh",
output_shape=(None, self.num_key_value_heads, self.head_dim),
bias_axes="vh" if self.use_bias else None,
name="value",
**self._get_common_kwargs_for_sublayer(),
)
self._value_dense.build(value_shape)
self._softmax = Softmax(axis=-1, dtype=self.dtype_policy)
self._dropout_layer = Dropout(
rate=self.dropout, dtype=self.dtype_policy, seed=self.seed
)
self._dot_product_equation = "bquh,bkuh->buqk"
self._combine_equation = "buqk,bkuh->bquh"
self._output_dense = EinsumDense(
"bquh,uhm->bqm",
output_shape=(None, self.feature_dim),
bias_axes="m" if self.use_bias else None,
name="attention_output",
**self._get_common_kwargs_for_sublayer(),
)
self._output_dense.build(
(None, None, self.num_query_heads, self.head_dim)
)
def _get_common_kwargs_for_sublayer(self):
common_kwargs = dict(
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
activity_regularizer=self.activity_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype_policy,
)
# Create new clone of kernel/bias initializer, so that we don't reuse
# the initializer instance, which could lead to same init value since
# initializer is stateless.
kernel_initializer = self.kernel_initializer.__class__.from_config(
self.kernel_initializer.get_config()
)
bias_initializer = self.bias_initializer.__class__.from_config(
self.bias_initializer.get_config()
)
common_kwargs["kernel_initializer"] = kernel_initializer
common_kwargs["bias_initializer"] = bias_initializer
return common_kwargs
def call(
self,
query,
value,
key=None,
query_mask=None,
value_mask=None,
key_mask=None,
attention_mask=None,
return_attention_scores=False,
training=None,
use_causal_mask=False,
):
self._return_attention_scores = return_attention_scores
if key is None:
key = value
attention_mask = self._compute_attention_mask(
query,
value,
query_mask=query_mask,
value_mask=value_mask,
key_mask=key_mask,
attention_mask=attention_mask,
use_causal_mask=use_causal_mask,
)
query = self._query_dense(query)
key = self._key_dense(key)
value = self._value_dense(value)
key = ops.repeat(
key, self.num_repeats, axis=2
) # (batch_dim, source_seq_len, query_heads, head_dim)
value = ops.repeat(
value, self.num_repeats, axis=2
) # (batch_dim, source_seq_len, query_heads, head_dim)
output, scores = self._compute_attention(
query,
key,
value,
attention_mask=attention_mask,
training=training,
)
output = self._output_dense(
output
) # (batch_dim, target_seq_len, feature_dim)
if return_attention_scores:
return output, scores
return output
def _compute_attention_mask(
self,
query,
value,
query_mask=None,
value_mask=None,
key_mask=None,
attention_mask=None,
use_causal_mask=False,
):
"""Computes the attention mask, using the Keras masks of the inputs.
* The `query`'s mask is reshaped from [B, T] to [B, T, 1].
* The `value`'s mask is reshaped from [B, S] to [B, 1, S].
* The `key`'s mask is reshaped from [B, S] to [B, 1, S]. The `key`'s
mask is ignored if `key` is `None` or if `key is value`.
* If `use_causal_mask=True`, then the causal mask is computed. Its shape
is [1, T, S].
All defined masks are merged using a logical AND operation (`&`).
In general, if the `query` and `value` are masked, then there is no need
to define the `attention_mask`.
Args:
query: Projected query tensor of shape `(B, T, N, key_dim)`.
key: Projected key tensor of shape `(B, T, N, key_dim)`.
value: Projected value tensor of shape `(B, T, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
use_causal_mask: A boolean to indicate whether to apply a causal
mask to prevent tokens from attending to future tokens (e.g.,
used in a decoder Transformer).
Returns:
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions, based on the Keras masks of the
`query`, `key`, `value`, and `attention_mask` tensors, and the
causal mask if `use_causal_mask=True`.
"""
auto_mask = None
if query_mask is not None:
query_mask = ops.cast(query_mask, "bool") # defensive casting
# B = batch size, T = max query length
auto_mask = ops.expand_dims(query_mask, -1) # shape is [B, T, 1]
if value_mask is not None:
value_mask = ops.cast(value_mask, "bool") # defensive casting
# B = batch size, S == max value length
mask = ops.expand_dims(value_mask, -2) # shape is [B, 1, S]
auto_mask = mask if auto_mask is None else auto_mask & mask
if key_mask is not None:
key_mask = ops.cast(key_mask, "bool") # defensive casting
# B == batch size, S == max key length == max value length
mask = ops.expand_dims(key_mask, -2) # shape is [B, 1, S]
auto_mask = mask if auto_mask is None else auto_mask & mask
if use_causal_mask:
# the shape of the causal mask is [1, T, S]
mask = self._compute_causal_mask(query, value)
auto_mask = mask if auto_mask is None else auto_mask & mask
if auto_mask is not None:
# merge attention_mask & automatic mask, to shape [B, T, S]
attention_mask = (
auto_mask
if attention_mask is None
else ops.cast(attention_mask, bool) & auto_mask
)
return attention_mask
def _compute_causal_mask(self, query, value=None):
"""Computes a causal mask (e.g., for masked self-attention layers).
For example, if query and value both contain sequences of length 4,
this function returns a boolean tensor equal to:
```
[[[True, False, False, False],
[True, True, False, False],
[True, True, True, False],
[True, True, True, True]]]
```
Args:
query: query tensor of shape `(B, T, ...)`.
value: value tensor of shape `(B, S, ...)` (optional, defaults to
query).
Returns:
mask: a boolean tensor of shape `(1, T, S)` containing a lower
triangular matrix of shape `(T, S)`.
"""
q_seq_length = ops.shape(query)[1]
v_seq_length = q_seq_length if value is None else ops.shape(value)[1]
ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype="int32")
row_index = ops.cumsum(ones_mask, axis=-2)
col_index = ops.cumsum(ones_mask, axis=-1)
return ops.greater_equal(row_index, col_index)
def _compute_attention(
self, query, key, value, attention_mask=None, training=None
):
# Check for flash attention constraints
if self._flash_attention and self._return_attention_scores:
raise ValueError(
"Returning attention scores is not supported when flash "
"attention is enabled. Please disable flash attention to access"
" attention scores."
)
# Determine whether to use dot-product attention
use_dot_product_attention = not (
self.dropout > 0.0
or self._return_attention_scores
or (len(query.shape) != 4)
)
if use_dot_product_attention:
if attention_mask is not None:
# Ensure attention_mask has the correct shape for broadcasting
# Expected shape: [batch_size, num_heads, query_seq_len,
# key_seq_len].
mask_expansion_axis = -1 * 2 - 1
len_attention_scores_shape = 4 # Only accepts 4D inputs
for _ in range(
len_attention_scores_shape - len(attention_mask.shape)
):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
attention_mask = ops.cast(attention_mask, dtype="bool")
# Directly compute the attention output using dot-product attention
attention_output = ops.dot_product_attention(
query=query,
key=key,
value=value,
bias=None,
mask=attention_mask,
scale=self._inverse_sqrt_head_dim,
is_causal=False,
flash_attention=self._flash_attention,
)
return attention_output, None
# Default behavior without flash attention, with explicit attention
# scores
query = ops.multiply(
query, ops.cast(self._inverse_sqrt_head_dim, query.dtype)
)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
scores = ops.einsum(
self._dot_product_equation, query, key
) # (batch_dim, query_heads, target_seq_len, source_seq_len)
scores = self._masked_softmax(scores, attention_mask=attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if self.dropout > 0.0:
scores_dropout = self._dropout_layer(scores, training=training)
else:
scores_dropout = scores
output = ops.einsum(self._combine_equation, scores_dropout, value)
return output, scores
def _masked_softmax(self, scores, attention_mask=None):
# Normalize the attention scores to probabilities.
# scores = [B, N, T, S]
if attention_mask is not None:
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims,
# key_attention_dims>)
mask_expansion_axis = -1 * 2 - 1
for _ in range(len(scores.shape) - len(attention_mask.shape)):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
return self._softmax(scores, mask=attention_mask)
def compute_output_shape(
self,
query_shape,
value_shape,
key_shape=None,
):
if key_shape is None:
key_shape = value_shape
if query_shape[-1] != value_shape[-1]:
raise ValueError(
"The last dimension of `query_shape` and `value_shape` "
f"must be equal, but are {query_shape[-1]}, {value_shape[-1]}. "
f"Received: query_shape={query_shape}, "
f"value_shape={value_shape}"
)
if value_shape[1:-1] != key_shape[1:-1]:
raise ValueError(
"All dimensions of `value` and `key`, except the last one, "
f"must be equal. Received: value_shape={value_shape} and "
f"key_shape={key_shape}"
)
return query_shape
def get_config(self):
config = {
"head_dim": self.head_dim,
"num_query_heads": self.num_query_heads,
"num_key_value_heads": self.num_key_value_heads,
"use_bias": self.use_bias,
"dropout": self.dropout,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/__init__.py | keras/src/layers/attention/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/additive_attention.py | keras/src/layers/attention/additive_attention.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.attention.attention import Attention
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/attention.py | keras/src/layers/attention/attention.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Attention")
class Attention(Layer):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as a `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
seed: A Python integer to use as random seed in case of `dropout`.
score_mode: Function to use to compute attention scores, one of
`{"dot", "concat"}`. `"dot"` refers to the dot product between the
query and key vectors. `"concat"` refers to the hyperbolic tangent
of the concatenation of the `query` and `key` vectors.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=False,
score_mode="dot",
dropout=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.use_scale = use_scale
self.score_mode = score_mode
self.dropout = dropout
if self.dropout > 0:
self.seed_generator = backend.random.SeedGenerator(seed=seed)
if self.score_mode not in ["dot", "concat"]:
raise ValueError(
"Invalid value for argument score_mode. "
"Expected one of {'dot', 'concat'}. "
f"Received: score_mode={score_mode}"
)
self._return_attention_scores = False
def build(self, input_shape):
self._validate_inputs(input_shape)
self.scale = None
self.concat_score_weight = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
if self.score_mode == "concat":
self.concat_score_weight = self.add_weight(
name="concat_score_weight",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
if self.score_mode == "dot":
scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1]))
if self.scale is not None:
scores = ops.multiply(scores, self.scale)
elif self.score_mode == "concat":
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
if self.scale is not None:
scores = self.concat_score_weight * ops.sum(
ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1
)
else:
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
else:
raise ValueError("scores not computed")
return scores
def _apply_scores(self, scores, value, scores_mask=None, training=False):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `(batch_size, Tq)` and `key` tensor of
shape `(batch_size, Tv)` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates
`attention_distribution = softmax(scores)`, then returns
`matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `(batch_size, Tq, Tv)`.
value: Value tensor of shape `(batch_size, Tv, dim)`.
scores_mask: A boolean mask tensor of shape `(batch_size, 1, Tv)`
or `(batch_size, Tq, Tv)`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must
contain at least one `True` value in each line along the last
dimension.
training: Python boolean indicating whether the layer should behave
in training mode (adding dropout) or in inference mode
(no dropout).
Returns:
Tensor of shape `(batch_size, Tq, dim)`.
Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
if scores_mask is not None:
padding_mask = ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention
# distribution. Note 65504. is the max float16 value.
max_value = 65504.0 if scores.dtype == "float16" else 1.0e9
if len(padding_mask.shape) == 2:
padding_mask = ops.expand_dims(padding_mask, axis=-2)
scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype)
weights = ops.softmax(scores, axis=-1)
if training and self.dropout > 0:
weights = backend.random.dropout(
weights,
self.dropout,
seed=self.seed_generator,
)
return ops.matmul(weights, value), weights
def _calculate_score_mask(self, scores, v_mask, use_causal_mask):
if use_causal_mask:
# Creates a lower triangular mask, so position i cannot attend to
# positions j > i. This prevents the flow of information from the
# future into the past.
score_shape = ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
mask_shape = (1, score_shape[-2], score_shape[-1])
ones_mask = ops.ones(shape=mask_shape, dtype="int32")
row_index = ops.cumsum(ones_mask, axis=-2)
col_index = ops.cumsum(ones_mask, axis=-1)
causal_mask = ops.greater_equal(row_index, col_index)
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = ops.expand_dims(v_mask, axis=-2)
return ops.logical_and(v_mask, causal_mask)
return causal_mask
else:
# If not using causal mask, return the value mask as is,
# or None if the value mask is not provided.
return v_mask
def call(
self,
inputs,
mask=None,
training=False,
return_attention_scores=False,
use_causal_mask=False,
):
self._validate_inputs(inputs=inputs, mask=mask)
self._return_attention_scores = return_attention_scores
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
scores_mask = self._calculate_score_mask(
scores, v_mask, use_causal_mask
)
attention_output, attention_scores = self._apply_scores(
scores=scores, value=v, scores_mask=scores_mask, training=training
)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = ops.expand_dims(q_mask, axis=-1)
attention_output *= ops.cast(q_mask, dtype=attention_output.dtype)
if return_attention_scores:
return (attention_output, attention_scores)
else:
return attention_output
def compute_mask(self, inputs, mask=None):
self._validate_inputs(inputs=inputs, mask=mask)
if mask is None or mask[0] is None:
return None
return ops.convert_to_tensor(mask[0])
def compute_output_shape(self, input_shape):
query_shape, value_shape, key_shape = input_shape
if key_shape is None:
key_shape = value_shape
output_shape = (*query_shape[:-1], value_shape[-1])
if self._return_attention_scores:
scores_shape = (query_shape[0], query_shape[1], key_shape[1])
return output_shape, scores_shape
return output_shape
def compute_output_spec(
self,
inputs,
mask=None,
return_attention_scores=False,
training=None,
use_causal_mask=False,
):
# Validate and unpack inputs
self._validate_inputs(inputs, mask)
query = inputs[0]
value = inputs[1]
key = inputs[2] if len(inputs) > 2 else value
# Compute primary output shape
output_shape = self.compute_output_shape(
[query.shape, value.shape, key.shape]
)
output_spec = KerasTensor(output_shape, dtype=self.compute_dtype)
# Handle attention scores if requested
if self._return_attention_scores or return_attention_scores:
scores_shape = (
query.shape[0],
query.shape[1],
key.shape[1],
) # (batch_size, Tq, Tv)
attention_scores_spec = KerasTensor(
scores_shape, dtype=self.compute_dtype
)
return (output_spec, attention_scores_spec)
return output_spec
def _validate_inputs(self, inputs, mask=None):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
f"{class_name} layer must be called on a list of inputs, "
"namely [query, value] or [query, value, key]. "
f"Received: inputs={inputs}."
)
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
f"{class_name} layer accepts inputs list of length 2 or 3, "
"namely [query, value] or [query, value, key]. "
f"Received length: {len(inputs)}."
)
if mask is not None:
if not isinstance(mask, list):
raise ValueError(
f"{class_name} layer mask must be a list, "
f"namely [query_mask, value_mask]. Received: mask={mask}."
)
if len(mask) < 2 or len(mask) > 3:
raise ValueError(
f"{class_name} layer accepts mask list of length 2 or 3. "
f"Received: inputs={inputs}, mask={mask}."
)
def get_config(self):
base_config = super().get_config()
config = {
"use_scale": self.use_scale,
"score_mode": self.score_mode,
"dropout": self.dropout,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/attention/multi_head_attention_test.py | keras/src/layers/attention/multi_head_attention_test.py | import os
import warnings
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import constraints
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import random
from keras.src import saving
from keras.src import testing
from keras.src.backend.config import disable_flash_attention
from keras.src.backend.config import enable_flash_attention
from keras.src.backend.config import is_flash_attention_enabled
class MultiHeadAttentionTest(testing.TestCase):
def setUp(self):
super().setUp()
# Flash attention is a newly introduced feature. We need to disable it
# for testing purposes.
disable_flash_attention()
def tearDown(self):
enable_flash_attention()
return super().tearDown()
def test_basics(self):
self.assertFalse(is_flash_attention_enabled())
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
"value_dim": 4,
"use_bias": False,
"dropout": 0.5,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=4,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_basics_with_flash_attention(self):
enable_flash_attention()
if backend.backend() in ("tensorflow", "numpy"):
self.skipTest(
"Flash attention is not supported in tensorflow and numpy "
"backends."
)
elif backend.backend() == "torch":
try:
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 8,
"dtype": "float16",
},
input_shape={
"query_shape": (2, 8, 16),
"value_shape": (2, 4, 16),
},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
except ImportError as e:
if "Flash attention is not supported" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"PyTorch version."
)
in str(e.args[0])
)
except RuntimeError as e:
if (
"Flash attention is not supported with the provided inputs"
in str(e.args[0])
):
self.assertTrue(
(
"Flash attention is not supported with the "
"provided inputs"
)
in str(e.args[0])
)
elif backend.backend() == "jax":
try:
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 8,
"dtype": "float16",
},
input_shape={
"query_shape": (2, 8, 16),
"value_shape": (2, 4, 16),
},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
except ImportError as e:
if "Flash attention is not supported" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"JAX version."
)
in str(e.args[0])
)
except RuntimeError as e:
if "cuDNN" in str(e.args[0]):
self.assertTrue("cuDNN is not detected." in str(e.args[0]))
elif "Require at least" in str(e.args[0]):
self.assertTrue(
"Require at least Ampere arch to run" in str(e.args[0])
)
elif "Flash attention" in str(e.args[0]):
self.assertTrue(
(
"Flash attention is not supported in your current "
"JAX version."
)
in str(e.args[0])
)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", (3, 4), (3, 2), (4, 2), (2,)),
("4d_inputs_1freebatch_mask3", (3, 4), (3, 2), (3, 4, 2), (2,)),
("4d_inputs_1freebatch_mask4", (3, 4), (3, 2), (3, 2, 4, 2), (2,)),
("4d_inputs_2d_attention", (3, 4), (3, 2), (3, 4, 3, 2), (1, 2)),
("5d_inputs_2d_attention", (5, 3, 4), (5, 3, 2), (3, 4, 3, 2), (2, 3)),
(
"5d_inputs_2d_attention_fullmask",
(5, 3, 4),
(5, 3, 2),
(5, 3, 4, 3, 2),
(2, 3),
),
)
def test_high_dim_attention(
self, q_dims, v_dims, mask_dims, attention_axes
):
batch_size, hidden_size = 3, 8
query_shape = (batch_size,) + q_dims + (hidden_size,)
value_shape = (batch_size,) + v_dims + (hidden_size,)
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
"attention_axes": attention_axes,
},
input_shape={
"query_shape": query_shape,
"value_shape": value_shape,
},
expected_output_shape=query_shape,
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_axes_negative_indexing(self):
x = np.random.normal(size=(2, 3, 8, 4))
# Create two layers with equivalent positive and negative indices
mha_pos = layers.MultiHeadAttention(
num_heads=2, key_dim=4, attention_axes=2
)
mha_neg = layers.MultiHeadAttention(
num_heads=2, key_dim=4, attention_axes=-2
)
# Initialize both layers
_ = mha_pos(x, x)
_ = mha_neg(x, x)
# Set same weights for fair comparison
mha_neg.set_weights(mha_pos.get_weights())
# Get outputs and attention scores
z_pos, a_pos = mha_pos(x, x, return_attention_scores=True)
z_neg, a_neg = mha_neg(x, x, return_attention_scores=True)
# Verify shapes match
self.assertEqual(z_pos.shape, z_neg.shape)
self.assertEqual(a_pos.shape, a_neg.shape)
# Verify outputs are identical
self.assertAllClose(z_pos, z_neg, rtol=1e-5, atol=1e-5)
self.assertAllClose(a_pos, a_neg, rtol=1e-5, atol=1e-5)
@parameterized.named_parameters(
("without_key_same_proj", (4, 8), (2, 8), None, None),
("with_key_same_proj", (4, 8), (2, 8), (2, 3), None),
("without_key_different_proj", (4, 8), (2, 8), None, (3, 4)),
("with_key_different_proj", (4, 8), (2, 8), (2, 3), (1, 5)),
("high_dim_same_proj", (4, 2, 3, 8), (1, 1, 5, 8), (1, 1, 5, 2), None),
(
"high_dim_different_proj",
(4, 2, 3, 8),
(1, 1, 5, 8),
(1, 1, 5, 2),
(3, 2),
),
(
"different_qv_last_dims",
(4, 2, 3, 8),
(4, 2, 3, 7),
(4, 2, 3, 8),
None,
),
)
def test_compute_output_shape(
self, query_dims, value_dims, key_dims, output_shape
):
"""Test computed shape is equal to the layer output's shape."""
layer = layers.MultiHeadAttention(
num_heads=2,
key_dim=2,
value_dim=2,
output_shape=output_shape,
)
batch_size = 7
query_shape = (batch_size,) + query_dims
value_shape = (batch_size,) + value_dims
key_shape = (batch_size,) + key_dims if key_dims else None
query = np.ones(query_shape)
value = np.ones(value_shape)
key = np.ones(key_shape) if key_shape else None
output = layer(query=query, value=value, key=key)
comp_output_shape = layer.compute_output_shape(
query_shape, value_shape, key_shape
)
self.assertEqual(output.shape, comp_output_shape)
# Test shapes as lists.
comp_output_shape = layer.compute_output_shape(
list(query_shape),
list(value_shape),
list(key_shape) if key_shape is not None else None,
)
self.assertEqual(output.shape, comp_output_shape)
@parameterized.named_parameters(
("query_value_dim_mismatch", (2, 4, 8), (2, 2, 7), (2,)),
("key_value_dim_mismatch", (2, 4, 8), (2, 2, 8), (2, 1, 7)),
(
"key_value_dim_mismatch_high_dim",
(2, 4, 2, 3, 8),
(2, 1, 1, 5, 8),
(2, 1, 15, 5, 2),
),
)
def test_shape_mismatch_error(self, query_shape, value_shape, key_shape):
"""Test dimension mismatches"""
layer = layers.MultiHeadAttention(
num_heads=4,
key_dim=2,
value_dim=2,
)
with self.assertRaisesRegex(ValueError, r"must be equal"):
layer.compute_output_shape(query_shape, value_shape, key_shape)
with self.assertRaisesRegex(ValueError, r"must be equal"):
layer(
np.ones(query_shape), np.ones(value_shape), np.ones(key_shape)
)
def test_initializer(self):
# Test with a specified initializer.
layer = layers.MultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=initializers.TruncatedNormal(stddev=0.02),
)
layer.build((2, 4, 8), (2, 4, 8))
# Make sure the sub layers have different kernel init value.
self.assertNotAllClose(
layer._query_dense.kernel,
layer._key_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._value_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._output_dense.kernel,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_query_mask_propagation(self):
"""Test automatic propagation of the query's mask."""
try:
layer = layers.MultiHeadAttention(num_heads=2, key_dim=2)
self.assertTrue(layer.supports_masking)
query = np.array(
[[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]]
)
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
query_mask = backend.get_keras_mask(masked_query)
value = np.random.normal(size=(3, 3, 8))
output = layer(query=masked_query, value=value)
except RuntimeError as e:
if e.args[0].startswith(
"(*bias): last dimension must be contiguous"
):
self.skipTest(
"PyTorch errors out on GPU: issue to track bug is here "
"https://github.com/keras-team/keras/issues/20459"
)
self.assertAllClose(query_mask, output._keras_mask)
@parameterized.named_parameters(("causal", True), ("not_causal", 0))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_masking(self, use_causal_mask):
"""Test that the value and causal masks are taken into account."""
layer = layers.MultiHeadAttention(num_heads=2, key_dim=2)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]])
masked_value = layers.Embedding(6, 8, mask_zero=True)(value)
output = layer(
query=masked_query,
value=masked_value,
use_causal_mask=use_causal_mask,
)
mask = np.array(
[[[1, 1, 0]] * 3 + [[0, 0, 0]] * 2]
+ [[[1, 0, 0]] * 5]
+ [[[1, 1, 1]] + [[0, 0, 0]] * 4]
)
if use_causal_mask:
mask = mask & np.array([[[1, 0, 0], [1, 1, 0]] + [[1, 1, 1]] * 3])
del masked_query._keras_mask
del masked_value._keras_mask
output_with_manual_mask = layer(
query=masked_query, value=masked_value, attention_mask=mask
)
self.assertAllClose(output, output_with_manual_mask)
def test_masking_with_different_shapes(self):
x = random.uniform(shape=(2, 5, 8))
mask = ops.tril(ops.ones((5, 5))) # (5, 5)
layer = layers.MultiHeadAttention(num_heads=2, key_dim=4)
output_1 = layer(query=x, value=x, attention_mask=mask)
mask = ops.tile(mask[None, ...], (2, 1, 1)) # (2, 5, 5)
output_2 = layer(query=x, value=x, attention_mask=mask)
mask = ops.tile(mask[:, None, ...], (1, 2, 1, 1)) # (2, 2, 5, 5)
output_3 = layer(query=x, value=x, attention_mask=mask)
self.assertAllClose(output_1, output_2)
self.assertAllClose(output_1, output_3)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_no_warning_with_keras_mask(self):
layer = layers.MultiHeadAttention(num_heads=2, key_dim=2)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]])
masked_value = layers.Embedding(6, 8, mask_zero=True)(value)
with warnings.catch_warnings(record=True) as warning_logs:
_ = layer(query=masked_query, value=masked_value)
self.assertLen(warning_logs, 0)
@parameterized.named_parameters(
("disable_flash_attention", False), ("enable_flash_attention", True)
)
def test_correctness(self, flash_attention):
if flash_attention:
# Let the backend decide whether to use flash attention
enable_flash_attention()
dtype = "float16" # Flash attention only accepts float16/bfloat16
num_heads = 8
key_dim = 8 # key_dim % 8 == 0 to enable flash attention
query = np.identity(key_dim)[np.newaxis, ...]
key = np.identity(key_dim)[np.newaxis, ...]
value = (
np.reshape(np.arange(key_dim * key_dim), (1, key_dim, key_dim))
/ 100.0 # Prevent overflow/underflow
)
# Setup layer.
layer = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=key_dim, dtype=dtype
)
layer.build(query.shape, key.shape, value.shape)
# Set layer weights.
kernel = np.identity(key_dim)
# To get an identity kernel we need to add a head dim and repeat on it.
kernel = np.repeat(kernel[:, np.newaxis, :], num_heads, axis=1)
# Zeros for all biases.
bias = np.zeros((num_heads, key_dim))
output_bias = np.zeros((key_dim,))
layer.set_weights([kernel, bias] * 3 + [kernel, output_bias])
# Call layer and assert output.
expected_output = np.array(
[2.406, 2.440, 2.473, 2.504, 2.535, 2.568, 2.602, 2.633]
)
expected_output = np.tile(
expected_output[np.newaxis, :, np.newaxis], (1, 1, key_dim)
)
expected_score = np.array(
[
[0.1187] * 0 + [0.1691] + [0.1187] * 7,
[0.1187] * 1 + [0.1691] + [0.1187] * 6,
[0.1187] * 2 + [0.1691] + [0.1187] * 5,
[0.1187] * 3 + [0.1691] + [0.1187] * 4,
[0.1187] * 4 + [0.1691] + [0.1187] * 3,
[0.1187] * 5 + [0.1691] + [0.1187] * 2,
[0.1187] * 6 + [0.1691] + [0.1187] * 1,
[0.1187] * 7 + [0.1691] + [0.1187] * 0,
]
)
expected_score = np.tile(
expected_score[np.newaxis, np.newaxis, ...], (1, key_dim, 1, 1)
)
if flash_attention:
output = layer(query=query, value=value, key=key)
self.assertAllClose(output, expected_output, atol=1e-2)
else:
output, scores = layer(
query=query,
value=value,
key=key,
return_attention_scores=True,
)
self.assertAllClose(output, expected_output, atol=1e-2)
self.assertAllClose(scores, expected_score, atol=1e-2)
def test_mha_constraints(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
num_heads = 2
key_dim = 2
layer = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=key_dim,
kernel_constraint="non_neg",
)
layer.build(query.shape, key.shape, value.shape)
self.assertIsInstance(
layer._query_dense.kernel.constraint, constraints.NonNeg
)
self.assertIsInstance(
layer._value_dense.kernel.constraint, constraints.NonNeg
)
self.assertIsInstance(
layer._key_dense.kernel.constraint, constraints.NonNeg
)
layer = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=key_dim,
bias_constraint="non_neg",
)
layer.build(query.shape, key.shape, value.shape)
self.assertIsInstance(
layer._query_dense.bias.constraint, constraints.NonNeg
)
self.assertIsInstance(
layer._value_dense.bias.constraint, constraints.NonNeg
)
self.assertIsInstance(
layer._key_dense.bias.constraint, constraints.NonNeg
)
@pytest.mark.requires_trainable_backend
def test_lora(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
layer = layers.MultiHeadAttention(
num_heads=3,
key_dim=8,
use_bias=False,
)
layer.build(query.shape, key.shape, value.shape)
layer.query_dense.enable_lora(2)
layer.key_dense.enable_lora(2)
layer.value_dense.enable_lora(2)
self.assertLen(layer.trainable_variables, 7)
self.assertLen(layer.non_trainable_variables, 3)
# Try eager call
x = {
"query": query,
"key": key,
"value": value,
}
y = np.random.random((1, 2, 2))
_ = layer(**x)
# Try calling fit()
inputs = {
"query": layers.Input((2, 2)),
"key": layers.Input((2, 2)),
"value": layers.Input((2, 2)),
}
outputs = layer(inputs["query"], inputs["key"], inputs["value"])
model = models.Model(inputs, outputs)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
inputs = {
"query": layers.Input((2, 2)),
"key": layers.Input((2, 2)),
"value": layers.Input((2, 2)),
}
outputs = layers.MultiHeadAttention(
num_heads=3,
key_dim=8,
use_bias=False,
)(inputs["query"], inputs["key"], inputs["value"])
new_model = models.Model(inputs, outputs)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.parameters([((1, 2, 3),), ((2, 3, 5),)])
def test_symbolic_return_attention_scores(self, shape):
mha = layers.MultiHeadAttention(num_heads=4, key_dim=2)
x = layers.Input(batch_shape=shape)
y = layers.Input(batch_shape=shape)
symbolic_out = mha(x, y, return_attention_scores=True)
self.assertLen(symbolic_out, 2)
x = np.random.random(shape)
y = np.random.random(shape)
out = mha(x, y, return_attention_scores=True)
self.assertLen(out, 2)
self.assertEqual(symbolic_out[0].shape, out[0].shape)
self.assertEqual(symbolic_out[1].shape, out[1].shape)
def test_dtype_policy_map(self):
quantized_policy = dtype_policies.QuantizedDTypePolicy(
"int8", "float32"
)
policy_map = dtype_policies.DTypePolicyMap()
# Preset the quantized policy
policy_map["mha/query"] = quantized_policy
policy_map["mha/key"] = quantized_policy
policy_map["mha/value"] = quantized_policy
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
layer = layers.MultiHeadAttention(
num_heads=3, key_dim=8, use_bias=False, dtype=policy_map, name="mha"
)
layer.build(query.shape, key.shape, value.shape)
# Sublayers should be quantized
self.assertDType(layer._query_dense._kernel, "int8")
self.assertDType(layer._key_dense._kernel, "int8")
self.assertDType(layer._value_dense._kernel, "int8")
def test_flash_attention_with_errors(self):
if backend.backend() in ("numpy", "tensorflow"):
pytest.skip(
reason=(
"Flash attention is not supported on tensorflow and numpy."
)
)
# Check `flash_attention=True` and `dropout=0.1`
with self.assertRaisesRegex(
ValueError,
"Dropout is not supported when flash attention is enabled.",
):
layer = layers.MultiHeadAttention(
num_heads=2, key_dim=2, flash_attention=True, dropout=0.1
)
# Check `flash_attention=True` and `return_attention_scores=True`
layer = layers.MultiHeadAttention(
num_heads=2, key_dim=2, flash_attention=True
)
self.assertTrue(layer._flash_attention)
query = np.random.random((2, 4, 8))
value = np.random.random((2, 4, 8))
with self.assertRaisesRegex(
ValueError,
"Returning attention scores is not supported when flash "
"attention is enabled. Please disable flash attention to access"
" attention scores.",
):
layer(query=query, value=value, return_attention_scores=True)
def test_multi_head_attention_output_shape_as_int(self):
"""Test MultiHeadAttention with output_shape as an int."""
mha = layers.MultiHeadAttention(num_heads=2, key_dim=16, output_shape=8)
query = random.uniform((2, 4, 16))
value = random.uniform((2, 4, 16))
output = mha(query=query, value=value)
assert output.shape == (
2,
4,
8,
), f"Expected shape (2, 4, 8), got {output.shape}"
def test_multi_head_attention_output_shape_as_tuple(self):
"""Test MultiHeadAttention with output_shape as a tuple."""
mha = layers.MultiHeadAttention(
num_heads=2, key_dim=16, output_shape=(8, 8)
)
query = random.uniform((2, 4, 16))
value = random.uniform((2, 4, 16))
output = mha(query=query, value=value)
assert output.shape == (
2,
4,
8,
8,
), f"Expected shape (2, 4, 8, 8), got {output.shape}"
def test_multi_head_attention_output_shape_error(self):
with self.assertRaisesRegex(ValueError, r"Invalid `output_shape`"):
layers.MultiHeadAttention(num_heads=2, key_dim=16, output_shape=8.0)
def test_quantize_int8(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
layer = layers.MultiHeadAttention(
num_heads=3,
key_dim=8,
use_bias=False,
)
layer.build(query.shape, value.shape, key.shape)
output_float = layer(query, key, value)
for sublayer in layer._flatten_layers():
try:
sublayer.quantize("int8")
except:
pass
# Verify weights dtype
self.assertDType(layer._query_dense._kernel, "int8")
self.assertDType(layer._key_dense._kernel, "int8")
self.assertDType(layer._value_dense._kernel, "int8")
self.assertDType(layer._output_dense._kernel, "int8")
# Try eager call and verify output correctness
output_quantized = layer(query, key, value)
mse = ops.mean(ops.square(output_float - output_quantized))
self.assertLess(mse, 1e-3) # A weak correctness test
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/concatenate.py | keras/src/layers/merging/concatenate.py | import copy
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Concatenate")
class Concatenate(Merge):
"""Concatenates a list of inputs.
It takes as input a list of tensors, all of the same shape except
for the concatenation axis, and returns a single tensor that is the
concatenation of all inputs.
Examples:
>>> x = np.arange(20).reshape(2, 2, 5)
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> keras.layers.Concatenate(axis=1)([x, y])
Usage in a Keras model:
>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> y = keras.layers.Concatenate()([x1, x2])
Args:
axis: Axis along which to concatenate.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
def build(self, input_shape):
# Used purely for shape validation.
if len(input_shape) < 1 or not isinstance(
input_shape[0], (tuple, list)
):
raise ValueError(
"A `Concatenate` layer should be called on a list of "
f"at least 1 input. Received: input_shape={input_shape}"
)
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes)
shape_set = set()
for i in range(len(reduced_inputs_shapes_copy)):
# Convert self.axis to positive axis for each input
# in case self.axis is a negative number
concat_axis = self.axis % len(reduced_inputs_shapes_copy[i])
# Skip batch axis.
for axis, axis_value in enumerate(
reduced_inputs_shapes_copy, start=1
):
# Remove squeezable axes (axes with value of 1)
# if not in the axis that will be used for concatenation
# otherwise leave it.
# This approach allows building the layer,
# but if tensor shapes are not the same when
# calling, an exception will be raised.
if axis != concat_axis and axis_value == 1:
del reduced_inputs_shapes[i][axis]
if len(reduced_inputs_shapes[i]) > self.axis:
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) != 1:
err_msg = (
"A `Concatenate` layer requires inputs with matching shapes "
"except for the concatenation axis. "
f"Received: input_shape={input_shape}"
)
# Make sure all the shapes have same ranks.
ranks = set(len(shape) for shape in shape_set)
if len(ranks) != 1:
raise ValueError(err_msg)
# Get the only rank for the set.
(rank,) = ranks
for axis in range(rank):
# Skip the Nones in the shape since they are dynamic, also the
# axis for concat has been removed above.
unique_dims = set(
shape[axis]
for shape in shape_set
if shape[axis] is not None
)
if len(unique_dims) > 1:
raise ValueError(err_msg)
def _merge_function(self, inputs):
return ops.concatenate(inputs, axis=self.axis)
def compute_output_shape(self, input_shape):
if (not isinstance(input_shape, (tuple, list))) or (
not isinstance(input_shape[0], (tuple, list))
):
raise ValueError(
"A `Concatenate` layer should be called on a list of inputs. "
f"Received: input_shape={input_shape}"
)
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(ops.ones_like(input_i, dtype="bool"))
elif mask_i.ndim < input_i.ndim:
# Broadcast mask shape to match in a way where we capture the
# input as a symbolic input in the op graph.
mask_i = ops.logical_or(
ops.expand_dims(mask_i, axis=-1),
ops.zeros_like(input_i, dtype="bool"),
)
masks.append(mask_i)
else:
masks.append(mask_i)
concatenated = ops.concatenate(masks, axis=self.axis)
return ops.any(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {"axis": self.axis}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.concatenate")
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Args:
inputs: A list of input tensors.
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/dot.py | keras/src/layers/merging/dot.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
from keras.src.utils.numerical_utils import normalize
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape, dimension 1 of
`x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape, always ignore
first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape, dimension 2 of
`y` has been summed over.
(`dot_axes[1]` = 2) `output_shape` = `(100, 30)`
Example:
>>> x_batch = np.ones(shape=(32, 20, 1))
>>> y_batch = np.ones(shape=(32, 30, 20))
>>> xy_batch_dot = batch_dot(x_batch, y_batch, axes=(1, 2))
Args:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: Tuple or list of integers with target dimensions, or single
integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]`
should be equal.
Note that axis `0` (the batch axis) cannot be included.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape (less the
batch dimension and the dimension that was summed over). If the final
rank is 1, we reshape it to `(batch_size, 1)`.
"""
x_shape = x.shape
y_shape = y.shape
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError(
f"Cannot do batch_dot on inputs "
f"with rank < 2. "
f"Received inputs with shapes "
f"{x_shape} and {y_shape}."
)
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError(
f"Cannot do batch_dot on inputs "
f"with different batch sizes. "
f"Received inputs with shapes "
f"{x_shape} and {y_shape}."
)
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError(
f"Multiple target dimensions are not supported. "
f"Expected: None, int, (int, int), "
f"Provided: {axes} "
)
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError(
"Cannot perform batch_dot over axis 0. "
"If your inputs are not batched, "
"add a dummy batch dimension to your "
"inputs using keras.ops.expand_dims(x, 0)"
)
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError(
f"Cannot do batch_dot on inputs with shapes "
f"{x_shape} and {y_shape} with axes={axes}. "
f"x.shape[{axes[0]}] != y.shape[{axes[1]}] ({d1} != {d2})."
)
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = ops.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = ops.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = ops.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = ops.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = ops.shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = (x_shape[0], -1, x_shape[-1])
x = ops.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = ops.shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = (y_shape[0], y_shape[1], -1)
y = ops.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = ops.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = ops.shape(result)
do_reshape = False
if x_squashed:
output_shape = output_shape[:1] + x_mid_dims + output_shape[-1:]
do_reshape = True
if y_squashed:
output_shape = output_shape[:-1] + y_trail_dims
do_reshape = True
if do_reshape:
result = ops.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = ops.squeeze(result, 1)
elif orig_y_ndim == 2:
result = ops.squeeze(result, -1)
return result
@keras_export("keras.layers.Dot")
class Dot(Merge):
"""Computes element-wise dot product of two tensors.
It takes a list of inputs of size 2, and the axes
corresponding to each input along with the dot product
is to be performed.
Let's say `x` and `y` are the two input tensors with shapes
`(2, 3, 5)` and `(2, 10, 3)`. The batch dimension should be
of same size for both the inputs, and `axes` should correspond
to the dimensions that have the same size in the corresponding
inputs. e.g. with `axes=(1, 2)`, the dot product of `x`, and `y`
will result in a tensor with shape `(2, 5, 10)`
Example:
>>> x = np.arange(10).reshape(1, 5, 2)
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> keras.layers.Dot(axes=(1, 2))([x, y])
Usage in a Keras model:
>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> y = keras.layers.Dot(axes=1)([x1, x2])
Args:
axes: Integer or tuple of integers, axis or axes along which to
take the dot product. If a tuple, should be two integers
corresponding to the desired axis from the first input and the
desired axis from the second input, respectively. Note that the
size of the two selected axes must match, and that
axis `0` (the batch axis) cannot be included.
normalize: Whether to L2-normalize samples along the dot product axis
before taking the dot product. If set to `True`, then
the output of the dot product is the cosine proximity
between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
def __init__(self, axes, normalize=False, **kwargs):
super().__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError(
f"Invalid type for argument `axes`: it should be "
f"a list or an int. Received: axes={axes}"
)
if len(axes) != 2:
raise ValueError(
f"Invalid format for argument `axes`: it should contain "
f"two elements. Received: axes={axes}"
)
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError(
f"Invalid format for argument `axes`: list elements should "
f"be integers. Received: axes={axes}"
)
self.axes = axes
self.normalize = normalize
self.supports_masking = True
self._reshape_required = False
def build(self, input_shape):
# Used purely for shape validation.
if (
not isinstance(input_shape[0], (tuple, list))
or len(input_shape) != 2
):
raise ValueError(
f"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = input_shape[0]
shape2 = input_shape[1]
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError(
f"Incompatible input shapes: "
f"axis values {shape1[axes[0]]} (at axis {axes[0]}) != "
f"{shape2[axes[1]]} (at axis {axes[1]}). "
f"Full input shapes: {shape1}, {shape2}"
)
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError(
f"A `Dot` layer should be called on exactly 2 inputs. "
f"Received: inputs={inputs}"
)
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [
self.axes % len(x1.shape),
self.axes % len(x2.shape),
]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % len(inputs[i].shape))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = normalize(x1, axis=axes[0])
x2 = normalize(x2, axis=axes[1])
output = batch_dot(x1, x2, axes)
return output
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:
raise ValueError(
f"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
"axes": self.axes,
"normalize": self.normalize,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.dot")
def dot(inputs, axes=-1, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
Note that axis `0` (the batch axis) cannot be included.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to `True`, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, **kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/add.py | keras/src/layers/merging/add.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Add")
class Add(Merge):
"""Performs elementwise addition operation.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Add()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `added = keras.layers.add([x1, x2])`
>>> added = keras.layers.Add()([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output
@keras_export("keras.layers.add")
def add(inputs, **kwargs):
"""Functional interface to the `keras.layers.Add` layer.
Args:
inputs: A list of input tensors with the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the sum of the inputs. It has the same shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.add([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> added = keras.layers.add([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Add(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/minimum.py | keras/src/layers/merging/minimum.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Minimum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.minimum([x1, x2])`
>>> y = keras.layers.Minimum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
return self._apply_merge_op_and_or_mask(ops.minimum, inputs)
@keras_export("keras.layers.minimum")
def minimum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Minimum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.minimum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.minimum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Minimum(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/maximum.py | keras/src/layers/merging/maximum.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Maximum")
class Maximum(Merge):
"""Computes element-wise maximum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Maximum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.maximum([x1, x2])`
>>> y = keras.layers.Maximum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
return self._apply_merge_op_and_or_mask(ops.maximum, inputs)
@keras_export("keras.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Maximum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.maximum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.maximum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Maximum(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/merging_test.py | keras/src/layers/merging/merging_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
def np_dot(a, b, axes):
if isinstance(axes, int):
axes = (axes, axes)
axes = [axis if axis < 0 else axis - 1 for axis in axes]
res = np.stack([np.tensordot(a[i], b[i], axes) for i in range(a.shape[0])])
if len(res.shape) == 1:
res = np.expand_dims(res, axis=1)
return res
TEST_PARAMETERS = [
{
"testcase_name": "add",
"layer_class": layers.Add,
"np_op": np.add,
},
{
"testcase_name": "subtract",
"layer_class": layers.Subtract,
"np_op": np.subtract,
},
{
"testcase_name": "minimum",
"layer_class": layers.Minimum,
"np_op": np.minimum,
},
{
"testcase_name": "maximum",
"layer_class": layers.Maximum,
"np_op": np.maximum,
},
{
"testcase_name": "multiply",
"layer_class": layers.Multiply,
"np_op": np.multiply,
},
{
"testcase_name": "average",
"layer_class": layers.Average,
"np_op": lambda a, b: np.multiply(np.add(a, b), 0.5),
},
{
"testcase_name": "concat",
"layer_class": layers.Concatenate,
"np_op": lambda a, b, **kwargs: np.concatenate((a, b), **kwargs),
"init_kwargs": {"axis": -1},
"expected_output_shape": (2, 4, 10),
},
{
"testcase_name": "dot_2d",
"layer_class": layers.Dot,
"np_op": np_dot,
"init_kwargs": {"axes": -1},
"input_shape": (2, 4),
"expected_output_shape": (2, 1),
"skip_mask_test": True,
},
{
"testcase_name": "dot_3d",
"layer_class": layers.Dot,
"np_op": np_dot,
"init_kwargs": {"axes": -1},
"expected_output_shape": (2, 4, 4),
"skip_mask_test": True,
},
]
@pytest.mark.requires_trainable_backend
class MergingLayersTest(testing.TestCase):
@parameterized.named_parameters(TEST_PARAMETERS)
def test_basic(
self,
layer_class,
init_kwargs={},
input_shape=(2, 4, 5),
expected_output_shape=(2, 4, 5),
**kwargs,
):
self.run_layer_test(
layer_class,
init_kwargs=init_kwargs,
input_shape=(input_shape, input_shape),
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.named_parameters(TEST_PARAMETERS)
def test_correctness_static(
self,
layer_class,
np_op,
init_kwargs={},
input_shape=(2, 4, 5),
expected_output_shape=(2, 4, 5),
skip_mask_test=False,
):
batch_size = input_shape[0]
shape = input_shape[1:]
x1 = np.random.rand(*input_shape)
x2 = np.random.rand(*input_shape)
x3 = np_op(x1, x2, **init_kwargs)
input_1 = layers.Input(shape=shape, batch_size=batch_size)
input_2 = layers.Input(shape=shape, batch_size=batch_size)
layer = layer_class(**init_kwargs)
out = layer([input_1, input_2])
model = models.Model([input_1, input_2], out)
res = model([x1, x2])
self.assertEqual(res.shape, expected_output_shape)
self.assertAllClose(res, x3, atol=1e-4, tpu_atol=1e-2, tpu_rtol=1e-2)
self.assertIsNone(layer.compute_mask([input_1, input_2], [None, None]))
self.assertIsNone(layer.compute_mask([x1, x2], [None, None]))
if not skip_mask_test:
mask1 = np.ones(input_shape[:-1], dtype=np.bool_)
mask2 = np.ones(input_shape[:-1], dtype=np.bool_)
self.assertTrue(
np.all(
backend.convert_to_numpy(
layer.compute_mask([x1, x2], [mask1, mask2])
)
)
)
@parameterized.named_parameters(TEST_PARAMETERS)
def test_correctness_dynamic(
self,
layer_class,
np_op,
init_kwargs={},
input_shape=(2, 4, 5),
expected_output_shape=(2, 4, 5),
skip_mask_test=False,
):
shape = input_shape[1:]
x1 = np.random.rand(*input_shape)
x2 = np.random.rand(*input_shape)
x3 = np_op(x1, x2, **init_kwargs)
input_1 = layers.Input(shape=shape)
input_2 = layers.Input(shape=shape)
layer = layer_class(**init_kwargs)
out = layer([input_1, input_2])
model = models.Model([input_1, input_2], out)
res = model([x1, x2])
self.assertEqual(res.shape, expected_output_shape)
self.assertAllClose(res, x3, atol=1e-4, tpu_atol=1e-2, tpu_rtol=1e-2)
self.assertIsNone(layer.compute_mask([input_1, input_2], [None, None]))
if not skip_mask_test:
self.assertTrue(
np.all(
backend.convert_to_numpy(
layer.compute_mask(
[input_1, input_2],
[backend.Variable(x1), backend.Variable(x2)],
)
)
)
)
@parameterized.named_parameters(TEST_PARAMETERS)
def test_errors(
self,
layer_class,
init_kwargs={},
input_shape=(2, 4, 5),
skip_mask_test=False,
**kwargs,
):
if skip_mask_test:
pytest.skip("Masking not supported")
batch_size = input_shape[0]
shape = input_shape[1:]
x1 = np.random.rand(*input_shape)
x1 = np.random.rand(batch_size, *shape)
input_1 = layers.Input(shape=shape, batch_size=batch_size)
input_2 = layers.Input(shape=shape, batch_size=batch_size)
layer = layer_class(**init_kwargs)
with self.assertRaisesRegex(ValueError, "`mask` should be a list."):
layer.compute_mask([input_1, input_2], x1)
with self.assertRaisesRegex(ValueError, "`inputs` should be a list."):
layer.compute_mask(input_1, [None, None])
with self.assertRaisesRegex(
ValueError, " should have the same length."
):
layer.compute_mask([input_1, input_2], [None])
def test_subtract_layer_inputs_length_errors(self):
shape = (4, 5)
input_1 = layers.Input(shape=shape)
input_2 = layers.Input(shape=shape)
input_3 = layers.Input(shape=shape)
with self.assertRaisesRegex(
ValueError, "layer should be called on exactly 2 inputs"
):
layers.Subtract()([input_1, input_2, input_3])
with self.assertRaisesRegex(
ValueError, "layer should be called on exactly 2 inputs"
):
layers.Subtract()([input_1])
def test_dot_higher_dim(self):
a_shape = (1, 3, 2)
b_shape = (1, 1, 2, 3)
# Test symbolic call
a = layers.Input(batch_shape=a_shape)
b = layers.Input(batch_shape=b_shape)
c = layers.Dot(axes=(-2, -1))([a, b])
self.assertEqual(c.shape, (1, 2, 1, 2))
a = np.random.random(a_shape)
b = np.random.random(b_shape)
c = layers.Dot(axes=(-2, -1))([a, b])
self.assertEqual(backend.standardize_shape(c.shape), (1, 2, 1, 2))
def test_add_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Add()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [1, 2], [6, 8]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Add()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [1, 2], [6, 8]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_subtract_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Subtract()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [-1, -2], [0, 0]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Subtract()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [-1, -2], [0, 0]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_average_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Average()([x1, x2])
self.assertAllClose(output, [[[0, 0], [0.5, 1], [0.5, 1], [3, 4]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Average()([x1, x2])
self.assertAllClose(output, [[[0, 0], [0.5, 1], [0.5, 1], [3, 4]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_multiply_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Multiply()([x1, x2])
self.assertAllClose(output, [[[0, 0], [0, 0], [1, 2], [9, 16]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Multiply()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [1, 2], [9, 16]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_maximum_with_mask(self):
mask = layers.Masking()
x1 = mask(
backend.convert_to_tensor([[[0, 0], [-1, -2], [0, 0], [-3, -4]]])
)
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [-1, -2], [-3, -4]]])
output = layers.Maximum()([x1, x2])
self.assertAllClose(output, [[[0, 0], [0, 0], [-1, -2], [-3, -4]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Maximum()([x1, x2])
self.assertAllClose(output, [[[0, 0], [-1, -2], [-1, -2], [-3, -4]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_minimum_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Minimum()([x1, x2])
self.assertAllClose(output, [[[0, 0], [0, 0], [1, 2], [3, 4]]])
self.assertIsNone(getattr(output, "_keras_mask", None))
x2 = mask(x2)
output = layers.Minimum()([x1, x2])
self.assertAllClose(output, [[[0, 0], [1, 2], [1, 2], [3, 4]]])
self.assertAllClose(output._keras_mask, [[0, 1, 1, 1]])
def test_concatenate_with_mask(self):
mask = layers.Masking()
x1 = mask(backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]]))
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
output = layers.Concatenate(axis=1)([x1, x2])
self.assertAllClose(
output,
[[[0, 0], [1, 2], [0, 0], [3, 4], [0, 0], [0, 0], [1, 2], [3, 4]]],
)
self.assertAllClose(output._keras_mask, [[0, 1, 0, 1, 1, 1, 1, 1]])
output = layers.Concatenate(axis=2)([x1, x2])
self.assertAllClose(
output,
[[[0, 0, 0, 0], [1, 2, 0, 0], [0, 0, 1, 2], [3, 4, 3, 4]]],
)
self.assertAllClose(output._keras_mask, [[1, 1, 1, 1]])
def test_concatenate_with_mask_symbolic(self):
input1 = layers.Input((4, 2))
input2 = layers.Input((4, 2))
mask = layers.Masking()
output = layers.Concatenate(axis=1)([mask(input1), input2])
model = models.Model(
inputs=[input1, input2], outputs=output._keras_mask
)
x1 = backend.convert_to_tensor([[[0, 0], [1, 2], [0, 0], [3, 4]]])
x2 = backend.convert_to_tensor([[[0, 0], [0, 0], [1, 2], [3, 4]]])
self.assertAllClose(model([x1, x2]), [[0, 1, 0, 1, 1, 1, 1, 1]])
def test_concatenate_errors(self):
# This should work
x1 = np.ones((1, 1, 1, 1, 5))
x2 = np.ones((1, 1, 1, 1, 4))
out = layers.Concatenate(axis=-1)([x1, x2])
self.assertEqual(ops.shape(out), (1, 1, 1, 1, 9))
# This won't
x1 = np.ones((1, 2, 1, 1, 5))
x2 = np.ones((1, 1, 1, 1, 4))
with self.assertRaisesRegex(
ValueError,
(
"requires inputs with matching shapes "
"except for the concatenation axis"
),
):
out = layers.Concatenate(axis=-1)([x1, x2])
x1 = np.ones((1, 2, 1, 2, 1))
x2 = np.ones((1, 1, 1, 3, 1))
with self.assertRaisesRegex(
ValueError,
(
"requires inputs with matching shapes "
"except for the concatenation axis"
),
):
out = layers.Concatenate(axis=1)([x1, x2])
@parameterized.named_parameters(TEST_PARAMETERS)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(
self,
layer_class,
np_op,
init_kwargs={},
input_shape=(2, 4, 5),
expected_output_shape=(2, 4, 5),
**kwargs,
):
self.run_layer_test(
layer_class,
init_kwargs=init_kwargs,
input_shape=[input_shape, input_shape],
input_sparse=True,
expected_output_shape=expected_output_shape,
expected_output_sparse=True,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
run_mixed_precision_check=False,
)
layer = layer_class(**init_kwargs)
# Merging a sparse tensor with a dense tensor, or a dense tensor with a
# sparse tensor produces a dense tensor
if backend.backend() == "tensorflow":
import tensorflow as tf
x1 = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3))
x3 = tf.SparseTensor([[0, 0], [1, 1]], [4.0, 5.0], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
# Use n_batch of 1 to be compatible with all ops.
x1 = jax_sparse.BCOO(([[1.0, 2.0]], [[[0], [2]]]), shape=(2, 3))
x3 = jax_sparse.BCOO(([[4.0, 5.0]], [[[0], [1]]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
x1_np = backend.convert_to_numpy(x1)
x2 = np.random.rand(2, 3)
self.assertAllClose(layer([x1, x2]), np_op(x1_np, x2, **init_kwargs))
self.assertAllClose(layer([x2, x1]), np_op(x2, x1_np, **init_kwargs))
# Merging a sparse tensor with a sparse tensor produces a sparse tensor
x3_np = backend.convert_to_numpy(x3)
self.assertSparse(layer([x1, x3]))
self.assertAllClose(layer([x1, x3]), np_op(x1_np, x3_np, **init_kwargs))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/average.py | keras/src/layers/merging/average.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Average")
class Average(Merge):
"""Averages a list of inputs element-wise..
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Average()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.average([x1, x2])`
>>> y = keras.layers.Average()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output / len(inputs)
@keras_export("keras.layers.average")
def average(inputs, **kwargs):
"""Functional interface to the `keras.layers.Average` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.average([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.average([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Average(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/base_merge.py | keras/src/layers/merging/base_merge.py | from keras.src import backend
from keras.src import ops
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
class Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Args:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _apply_merge_op_and_or_mask(self, op_fn, inputs):
"""Merge a set of inputs by applying `op_fn` and ORing the masks.
We use this for `Minimum` and `Maximum` as it handles the fact that
there is no identity element. If applicable, the mask obtained by ORing
all masks is set on the output.
Args:
op_fn: binary operation to apply to tensor pair.
inputs: array of tensors to apply operation on.
"""
output = None
output_mask = None
for x in inputs:
mask = backend.get_keras_mask(x)
if mask is not None:
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
if output is None:
output = x
output_mask = mask
continue
if mask is not None:
x = ops.where(mask, x, output)
if output_mask is not None:
output = ops.where(output_mask, output, x)
if mask is not None and output_mask is not None:
output_mask = ops.logical_or(output_mask, mask)
else:
output_mask = None
output = op_fn(output, x)
if output_mask is not None:
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
backend.set_keras_mask(output, output_mask)
return output
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Args:
shape1: Tuple or None. Shape of the first tensor
shape2: Tuple or None. Shape of the second tensor
Returns:
Expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: If shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[: -len(shape2)])
for i, j in zip(shape1[-len(shape2) :], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(
"Inputs have incompatible shapes. "
f"Received shapes {shape1} and {shape2}"
)
output_shape.append(i)
return tuple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], (tuple, list)):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: input_shape={input_shape} (not a list of shapes)"
)
if len(input_shape) < 1:
raise ValueError(
"A merge layer should be called "
"on a list of at least 1 input. "
f"Received {len(input_shape)} inputs. "
f"Full input_shape received: {input_shape}"
)
batch_sizes = {s[0] for s in input_shape if s} - {None}
if len(batch_sizes) > 1:
raise ValueError(
"Cannot merge tensors with different batch sizes. "
f"Received tensors with shapes {input_shape}"
)
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
def call(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: inputs={inputs} (not a list of tensors)"
)
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(ops.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = ops.ndim(x)
for _ in range(max_ndim - x_ndim):
x = ops.expand_dims(x, axis=1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... ,
# batch_size)
transposed = False
for x in inputs:
x_ndim = ops.ndim(x)
if x_ndim is None:
x_shape = ops.shape(x)
batch_size = x_shape[0]
new_shape = backend.concatenate(
[x_shape[1:], ops.expand_dims(batch_size, axis=-1)]
)
x_transposed = ops.reshape(
x,
ops.stack(
[batch_size, ops.prod(x_shape[1:])],
axis=0,
),
)
x_transposed = ops.transpose(x_transposed, perm=(1, 0))
x_transposed = ops.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(ops.transpose(x, perm=dims))
print(dims)
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or
# scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = ops.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the
# output too.
if y_ndim is None:
y_shape = ops.shape(y)
y_ndim = ops.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = ops.concatenate(
[
ops.expand_dims(batch_size, axis=-1),
y_shape[: y_ndim - 1],
]
)
y = ops.reshape(y, (-1, batch_size))
y = ops.transpose(y, perm=(1, 0))
y = ops.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = ops.transpose(y, perm=dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
batch_sizes = {s[0] for s in input_shape if s is not None} - {None}
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape([x.shape for x in inputs])
output_sparse = all(x.sparse for x in inputs)
return KerasTensor(
output_shape, dtype=self.compute_dtype, sparse=output_sparse
)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received: mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
# Default implementation does an OR between the masks, which works
# for `Add`, `Subtract`, `Average`, `Maximum`, `Minimum`, `Multiply`.
if any(m is None for m in mask):
return None
output_mask = mask[0]
for m in mask[1:]:
output_mask = ops.logical_or(output_mask, m)
return output_mask
def get_config(self):
return super().get_config()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/__init__.py | keras/src/layers/merging/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/multiply.py | keras/src/layers/merging/multiply.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Multiply")
class Multiply(Merge):
"""Performs elementwise multiplication.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Multiply()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.multiply([x1, x2])`
>>> y = keras.layers.Multiply()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
masks = [backend.get_keras_mask(x) for x in inputs]
has_output_mask = all(mask is not None for mask in masks)
output = None
output_mask = None
for x, mask in zip(inputs, masks):
if mask is not None:
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
# Replace 0s with 1s outside of mask.
x = ops.where(mask, x, ops.cast(1, x.dtype))
if has_output_mask:
output_mask = (
mask
if output_mask is None
else ops.logical_or(output_mask, mask)
)
output = x if output is None else ops.multiply(output, x)
if has_output_mask:
# Replace 1s with 0s outside of mask per standard masking rules.
output = ops.where(output_mask, output, ops.cast(0, output.dtype))
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
backend.set_keras_mask(output, output_mask)
return output
@keras_export("keras.layers.multiply")
def multiply(inputs, **kwargs):
"""Functional interface to the `keras.layers.Multiply` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.multiply([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.multiply([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Multiply(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/merging/subtract.py | keras/src/layers/merging/subtract.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Subtract")
class Subtract(Merge):
"""Performs elementwise subtraction.
It takes as input a list of tensors of size 2 both of the
same shape, and returns a single tensor (inputs[0] - inputs[1])
of same shape.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Subtract()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])`
>>> subtracted = keras.layers.Subtract()([x1, x2])
>>> out = keras.layers.Dense(4)(subtracted)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def build(self, input_shape):
super().build(input_shape)
if len(input_shape) != 2:
raise ValueError(
"A `Subtract` layer should be called on exactly 2 inputs. "
f"Received: input_shape={input_shape}"
)
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError(
"A `Subtract` layer should be called on exactly 2 inputs. "
f"Received: inputs={inputs}"
)
return ops.subtract(inputs[0], inputs[1])
@keras_export("keras.layers.subtract")
def subtract(inputs, **kwargs):
"""Functional interface to the `keras.layers.Subtract` layer.
Args:
inputs: A list of input tensors of size 2, each tensor of
the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the difference of the inputs. It has the same shape
as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.subtract([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> subtracted = keras.layers.subtract([x1, x2])
>>> out = keras.layers.Dense(4)(subtracted)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Subtract(**kwargs)(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding2d_test.py | keras/src/layers/reshaping/zero_padding2d_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2))}, # 2 tuples
{"padding": (2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_2d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, :], inputs)
def test_zero_padding_2d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4))
else:
input_layer = layers.Input(batch_shape=(1, 4, 2, None))
padded = layers.ZeroPadding2D(((1, 2), (3, 4)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 4))
else:
self.assertEqual(padded.shape, (1, 4, 5, None))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4, 5))},
{"padding": ((1, 2), (3, -4))},
{"padding": ((1, 2), "3")},
)
def test_zero_padding_2d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d_get_config(self, data_format):
layer = layers.ZeroPadding2D(padding=(1, 2), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping3d_test.py | keras/src/layers/reshaping/cropping3d_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class Cropping3DTest(testing.TestCase):
@parameterized.product(
(
{"dim1_cropping": (1, 2), "dim1_expected": (1, 5)}, # both
{"dim1_cropping": (0, 2), "dim1_expected": (0, 5)}, # left only
{"dim1_cropping": (1, 0), "dim1_expected": (1, 7)}, # right only
),
(
{"dim2_cropping": (3, 4), "dim2_expected": (3, 5)}, # both
{"dim2_cropping": (0, 4), "dim2_expected": (0, 5)}, # left only
{"dim2_cropping": (3, 0), "dim2_expected": (3, 9)}, # right only
),
(
{"dim3_cropping": (5, 6), "dim3_expected": (5, 7)}, # both
{"dim3_cropping": (0, 6), "dim3_expected": (0, 7)}, # left only
{"dim3_cropping": (5, 0), "dim3_expected": (5, 13)}, # right only
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d(
self,
dim1_cropping,
dim2_cropping,
dim3_cropping,
data_format,
dim1_expected,
dim2_expected,
dim3_expected,
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
:,
]
)
cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
@parameterized.product(
(
# same cropping values with 3 tuples
{
"cropping": ((2, 2), (2, 2), (2, 2)),
"expected": ((2, 5), (2, 7), (2, 11)),
},
# same cropping values with 1 tuple
{"cropping": (2, 2, 2), "expected": ((2, 5), (2, 7), (2, 11))},
# same cropping values with an integer
{"cropping": 2, "expected": ((2, 5), (2, 7), (2, 11))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d_with_same_cropping(
self, cropping, data_format, expected
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
:,
]
)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 7, None, 13, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 7, None, 13))
cropped = layers.Cropping3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(cropped.shape, (1, 4, None, 2, 5))
else:
self.assertEqual(cropped.shape, (1, 5, 4, None, 2))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0), (0, 0))},
{"cropping": ((0, 0), (5, 8), (0, 0))},
{"cropping": ((0, 0), (0, 0), (7, 6))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 13, 5))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_3d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2, 3, 4))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, 6, 7)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, -6)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), "5"))
@parameterized.product(
(
{"cropping": ((8, 1), (1, 1), (1, 1))},
{"cropping": ((1, 1), (10, 1), (1, 1))},
{"cropping": ((1, 1), (1, 1), (14, 1))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_with_excessive_cropping(self, cropping, data_format):
if data_format == "channels_first":
shape = (3, 5, 7, 9, 13)
input_layer = layers.Input(batch_shape=shape)
else:
shape = (3, 7, 9, 13, 5)
input_layer = layers.Input(batch_shape=shape)
expected_error_msg = (
"Values in `cropping` argument should be smaller than the"
)
with self.assertRaisesRegex(ValueError, expected_error_msg):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping1d_test.py | keras/src/layers/reshaping/cropping1d_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class Cropping1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_cropping_1d(self):
inputs = np.random.rand(3, 5, 7)
# Cropping with different values on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 2)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:3, :]),
)
# Same cropping on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Same cropping on the left and the right provided as an int.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": 1},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Cropping on the right only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (0, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 0:4, :]),
)
# Cropping on the left only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 0)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:5, :]),
)
@pytest.mark.requires_trainable_backend
def test_cropping_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 7))
cropped = layers.Cropping1D((1, 2))(input_layer)
self.assertEqual(cropped.shape, (1, None, 7))
def test_cropping_1d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1, 2, 3))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping="1")
def test_cropping_1d_errors_if_cropping_more_than_available(self):
with self.assertRaisesRegex(
ValueError,
"`cropping` parameter of `Cropping1D` layer must be smaller than",
):
input_layer = layers.Input(batch_shape=(3, 5, 7))
layers.Cropping1D(cropping=(2, 3))(input_layer)
def test_cropping_1d_error_on_excessive_cropping(self):
inputs = np.random.rand(3, 5, 7)
with self.assertRaisesRegex(
ValueError,
"`cropping` parameter of `Cropping1D` layer must be smaller than",
):
layer = layers.Cropping1D(cropping=(3, 3))
_ = layer(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/flatten_test.py | keras/src/layers/reshaping/flatten_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from conftest import skip_if_backend
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
class FlattenTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_flatten(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
# Make the ndarray relatively sparse
inputs = np.multiply(inputs, inputs >= 0.8)
expected_output_channels_last = ops.convert_to_tensor(
np.reshape(inputs, (-1, 5 * 5 * 3))
)
expected_output_channels_first = ops.convert_to_tensor(
np.reshape(np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
)
if sparse:
if backend.backend() == "tensorflow":
import tensorflow as tf
dense_to_sparse = tf.sparse.from_dense
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
dense_to_sparse = jax_sparse.BCOO.fromdense
else:
self.fail(
f"Sparse is unsupported with backend {backend.backend()}"
)
inputs = dense_to_sparse(inputs)
expected_output_channels_last = dense_to_sparse(
expected_output_channels_last
)
expected_output_channels_first = dense_to_sparse(
expected_output_channels_first
)
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
input_sparse=True,
expected_output=(
expected_output_channels_last
if backend.config.image_data_format() == "channels_last"
else expected_output_channels_first
),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_last,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_first,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
@pytest.mark.requires_trainable_backend
def test_flatten_with_scalar_channels(self):
inputs = np.random.random((10,)).astype("float32")
expected_output = ops.convert_to_tensor(np.expand_dims(inputs, -1))
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
expected_output=expected_output,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
expected_output=expected_output,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
expected_output=expected_output,
)
def test_flatten_symbolic_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 2, 3))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (None, 2 * 3))
def test_flatten_symbolic_with_dynamic_dimension(self):
input_layer = layers.Input(batch_shape=(5, 2, None))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (5, None))
@skip_if_backend("openvino", "Dynamic dimensions not supported by OpenVino")
def test_flatten_with_dynamic_batch_size_and_dynamic_dimenstions(self):
def generator():
yield (np.ones((3, 5, 7), dtype="float32"),)
yield (np.ones((2, 7, 5), dtype="float32"),)
model = models.Sequential([layers.Flatten()])
model.predict(generator())
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling1d.py | keras/src/layers/reshaping/up_sampling1d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.UpSampling1D(size=2)(x)
>>> y
[[[ 0. 1. 2.]
[ 0. 1. 2.]
[ 3. 4. 5.]
[ 3. 4. 5.]]
[[ 6. 7. 8.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 9. 10. 11.]]]
Args:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super().__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
size = (
self.size * input_shape[1] if input_shape[1] is not None else None
)
return [input_shape[0], size, input_shape[2]]
def call(self, inputs):
return ops.repeat(x=inputs, repeats=self.size, axis=1)
def get_config(self):
config = {"size": self.size}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/permute_test.py | keras/src/layers/reshaping/permute_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class PermuteTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_permute(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
# Make the ndarray relatively sparse
inputs = np.multiply(inputs, inputs >= 0.8)
expected_output = ops.convert_to_tensor(
np.transpose(inputs, axes=(0, 3, 1, 2))
)
if sparse:
if backend.backend() == "tensorflow":
import tensorflow as tf
inputs = tf.sparse.from_dense(inputs)
expected_output = tf.sparse.from_dense(expected_output)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
inputs = jax_sparse.BCOO.fromdense(inputs)
expected_output = jax_sparse.BCOO.fromdense(expected_output)
else:
self.fail(
f"Backend {backend.backend()} does not support sparse"
)
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (3, 1, 2)},
input_data=inputs,
input_sparse=sparse,
expected_output=expected_output,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_permute_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 3, 5))
permuted = layers.Permute((2, 1))(input_layer)
self.assertEqual(permuted.shape, (None, 5, 3))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegex(
ValueError, r"Invalid permutation .*dims.*"
):
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (0, 1, 2)},
input_shape=(3, 2, 4),
)
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegex(
ValueError, r"Invalid permutation .*dims.*"
):
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (1, 4, 2)},
input_shape=(3, 2, 4),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding1d.py | keras/src/layers/reshaping/zero_padding1d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.ZeroPadding1D")
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.ZeroPadding1D(padding=2)(x)
>>> y
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]]
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int: how many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of 2 ints: how many zeros to add at the beginning and the
end of the padding dimension (`(left_pad, right_pad)`).
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, axis_to_pad, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, axis_to_pad)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
3D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, axis_to_pad, features)`
- If `data_format` is `"channels_first"`:
`(batch_size, features, axis_to_pad)`
Output shape:
3D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_axis, features)`
- If `data_format` is `"channels_first"`:
`(batch_size, features, padded_axis)`
"""
def __init__(self, padding=1, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.padding = argument_validation.standardize_tuple(
padding, 2, "padding", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
padding_dim = 2 if self.data_format == "channels_first" else 1
if output_shape[padding_dim] is not None:
output_shape[padding_dim] += self.padding[0] + self.padding[1]
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), self.padding)
else:
all_dims_padding = ((0, 0), self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding2d.py | keras/src/layers/reshaping/zero_padding2d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.ZeroPadding2D")
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros at the top, bottom, left and
right side of an image tensor.
Example:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[[0 1]
[2 3]]]]
>>> y = keras.layers.ZeroPadding2D(padding=1)(x)
>>> y
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]]
Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding is applied to height and width.
- If tuple of 2 ints: interpreted as two different symmetric padding
values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints: interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, height, width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, height, width)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_height, padded_width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_height, padded_width)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, "__len__"):
if len(padding) != 2:
raise ValueError(
"`padding` should have two elements. "
f"Received: padding={padding}."
)
height_padding = argument_validation.standardize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
width_padding = argument_validation.standardize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
self.padding = (height_padding, width_padding)
else:
raise ValueError(
"`padding` should be either an int, a tuple of 2 ints "
"(symmetric_height_crop, symmetric_width_crop), "
"or a tuple of 2 tuples of 2 ints "
"((top_crop, bottom_crop), (left_crop, right_crop)). "
f"Received: padding={padding}."
)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
for index in range(0, 2):
if output_shape[index + spatial_dims_offset] is not None:
output_shape[index + spatial_dims_offset] += (
self.padding[index][0] + self.padding[index][1]
)
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), *self.padding)
else:
all_dims_padding = ((0, 0), *self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping2d_test.py | keras/src/layers/reshaping/cropping2d_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class Cropping2DTest(testing.TestCase):
@parameterized.product(
(
# different cropping values
{"cropping": ((1, 2), (3, 4)), "expected_ranges": ((1, 5), (3, 5))},
# same cropping values with 2 tuples
{"cropping": ((2, 2), (2, 2)), "expected_ranges": ((2, 5), (2, 7))},
# same cropping values with 1 tuple
{"cropping": (2, 2), "expected_ranges": ((2, 5), (2, 7))},
# same cropping values with an integer
{"cropping": 2, "expected_ranges": ((2, 5), (2, 7))},
# cropping right only in both dimensions
{"cropping": ((0, 2), (0, 4)), "expected_ranges": ((0, 5), (0, 5))},
# cropping left only in both dimensions
{"cropping": ((1, 0), (3, 0)), "expected_ranges": ((1, 7), (3, 9))},
# cropping left only in rows dimension
{"cropping": ((1, 0), (3, 4)), "expected_ranges": ((1, 7), (3, 5))},
# cropping left only in cols dimension
{"cropping": ((1, 2), (3, 0)), "expected_ranges": ((1, 5), (3, 9))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_2d(self, cropping, data_format, expected_ranges):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected_ranges[0][0] : expected_ranges[0][1],
expected_ranges[1][0] : expected_ranges[1][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected_ranges[0][0] : expected_ranges[0][1],
expected_ranges[1][0] : expected_ranges[1][1],
:,
]
)
self.run_layer_test(
layers.Cropping2D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_2d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 7, None, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 7, None))
cropped = layers.Cropping2D(((1, 2), (3, 4)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(cropped.shape, (1, 4, None, 5))
else:
self.assertEqual(cropped.shape, (1, 5, 4, None))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0))},
{"cropping": ((0, 0), (5, 4))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_2d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 5))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_2d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=(1, 2, 3))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), (3, 4, 5)))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), (3, -4)))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), "3"))
@parameterized.product(
(
{"cropping": ((4, 5), (0, 0)), "input_shape": (3, 8, 9, 5)},
{"cropping": ((0, 0), (5, 5)), "input_shape": (3, 8, 9, 5)},
{"cropping": ((6, 3), (0, 0)), "input_shape": (3, 8, 9, 5)},
{"cropping": ((0, 0), (7, 3)), "input_shape": (3, 8, 9, 5)},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_2d_error_on_excessive_cropping(
self, cropping, input_shape, data_format
):
inputs = np.random.rand(*input_shape)
with self.assertRaisesRegex(
ValueError,
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input.",
):
layer = layers.Cropping2D(
cropping=cropping, data_format=data_format
)
_ = layer(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling3d_test.py | keras/src/layers/reshaping/up_sampling3d_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class UpSampling3dTest(testing.TestCase):
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_dim1=[2, 3],
length_dim2=[2],
length_dim3=[3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_3d(
self, data_format, length_dim1, length_dim2, length_dim3
):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
stack_size,
input_len_dim1,
input_len_dim2,
input_len_dim3,
)
else:
inputs = np.random.rand(
num_samples,
input_len_dim1,
input_len_dim2,
input_len_dim3,
stack_size,
)
# basic test
if data_format == "channels_first":
expected_output_shape = (2, 2, 20, 22, 24)
else:
expected_output_shape = (2, 20, 22, 24, 2)
self.run_layer_test(
layers.UpSampling3D,
init_kwargs={"size": (2, 2, 2), "data_format": data_format},
input_shape=inputs.shape,
expected_output_shape=expected_output_shape,
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
self.assertAllClose(np_output, expected_out)
def test_upsampling_3d_correctness(self):
input_shape = (2, 1, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
expected_output = np.array(
[
[
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
],
[
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
],
]
)
if backend.config.image_data_format() == "channels_first":
expected_output = expected_output.transpose((0, 4, 1, 2, 3))
x = x.transpose((0, 4, 1, 2, 3))
self.assertAllClose(
layers.UpSampling3D(size=(2, 2, 2))(x), expected_output
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding3d_test.py | keras/src/layers/reshaping/zero_padding3d_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding3DTest(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"}, {"data_format": "channels_last"}
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 2, None, 4))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
else:
self.assertEqual(padded.shape, (1, 5, 5, None, 15))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2)},
{"padding": (1, 2, 3, 4)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4), (5, 6, 7))},
{"padding": ((1, 2), (3, 4), (5, -6))},
{"padding": ((1, 2), (3, 4), "5")},
)
def test_zero_padding_3d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_3d_get_config(self, data_format):
layer = layers.ZeroPadding3D(padding=(1, 2, 3), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2), (3, 3)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/reshape.py | keras/src/layers/reshaping/reshape.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.ops import operation_utils
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size). One element of the `target_shape`
can be -1 in which case the missing value is inferred from the
size of the array and remaining dimensions.
Input shape:
Arbitrary, but required to be compatible with `target_shape`.
Output shape:
`(batch_size, *target_shape)`
Example:
>>> x = keras.Input(shape=(12,))
>>> y = keras.layers.Reshape((3, 4))(x)
>>> y.shape
(None, 3, 4)
>>> # another example with shape inference using `-1` as dimension
>>> y = keras.layers.Reshape((-1, 2, 2))(x)
>>> y.shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
super().__init__(**kwargs)
target_shape = tuple(target_shape)
# test validity of target_shape
if target_shape.count(-1) > 1:
raise ValueError(
"The `target_shape` argument must not contain more than one "
f"`-1` value. Received: target_shape={target_shape}"
)
self.target_shape = target_shape
self.built = True
def compute_output_shape(self, input_shape):
return (
input_shape[0],
*operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
),
)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
potentially_resolved_target_shape = (
operation_utils.compute_reshape_output_shape(
tuple(inputs.shape)[1:], self.target_shape, "target_shape"
)
)
potentially_resolved_target_shape = tuple(
-1 if d is None else d for d in potentially_resolved_target_shape
)
return ops.reshape(
inputs, (ops.shape(inputs)[0],) + potentially_resolved_target_shape
)
def get_config(self):
config = {"target_shape": self.target_shape}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/repeat_vector.py | keras/src/layers/reshaping/repeat_vector.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.RepeatVector")
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
>>> x = keras.Input(shape=(32,))
>>> y = keras.layers.RepeatVector(3)(x)
>>> y.shape
(None, 3, 32)
Args:
n: Integer, repetition factor.
Input shape:
2D tensor with shape `(batch_size, features)`.
Output shape:
3D tensor with shape `(batch_size, n, features)`.
"""
def __init__(self, n, **kwargs):
super().__init__(**kwargs)
self.n = n
if not isinstance(n, int):
raise TypeError(
f"Expected an integer value for `n`, got {type(n)}."
)
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.n, input_shape[1])
def call(self, inputs):
input_shape = ops.shape(inputs)
reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1]))
return ops.repeat(reshaped, self.n, axis=1)
def get_config(self):
config = {"n": self.n}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling2d_test.py | keras/src/layers/reshaping/up_sampling2d_test.py | # flake8: noqa
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.backend import set_image_data_format
class UpSampling2dTest(testing.TestCase):
@classmethod
def setUpClass(cls):
cls.original_image_data_format = backend.image_data_format()
@classmethod
def tearDownClass(cls):
backend.set_image_data_format(cls.original_image_data_format)
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_row=[2],
length_col=[2, 3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_2d(self, data_format, length_row, length_col):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples, stack_size, input_num_row, input_num_col
)
else:
inputs = np.random.rand(
num_samples, input_num_row, input_num_col, stack_size
)
# basic test
self.run_layer_test(
layers.UpSampling2D,
init_kwargs={"size": (2, 2), "data_format": data_format},
input_shape=inputs.shape,
)
layer = layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else:
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else:
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
self.assertAllClose(np_output, expected_out)
@parameterized.product(
data_format=["channels_first", "channels_last"],
use_set_image_data_format=[True, False],
length_row=[2],
length_col=[2, 3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_2d_bilinear(
self, data_format, use_set_image_data_format, length_row, length_col
):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
if use_set_image_data_format:
set_image_data_format(data_format)
if data_format == "channels_first":
inputs = np.random.rand(
num_samples, stack_size, input_num_row, input_num_col
)
else:
inputs = np.random.rand(
num_samples, input_num_row, input_num_col, stack_size
)
self.run_layer_test(
layers.UpSampling2D,
init_kwargs={
"size": (2, 2),
"data_format": data_format,
"interpolation": "bilinear",
},
input_shape=inputs.shape,
)
layer = layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format,
interpolation="bilinear",
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_2d_correctness(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
# fmt: off
expected_output = np.array(
[[[[ 0., 1., 2.],
[ 0., 1., 2.]],
[[ 3., 4., 5.],
[ 3., 4., 5.]]],
[[[ 6., 7., 8.],
[ 6., 7., 8.]],
[[ 9., 10., 11.],
[ 9., 10., 11.]]]]
)
# fmt: on
if backend.config.image_data_format() == "channels_first":
expected_output = expected_output.transpose((0, 3, 1, 2))
x = x.transpose((0, 3, 1, 2))
self.assertAllClose(
layers.UpSampling2D(size=(1, 2))(x), expected_output
)
def test_upsampling_2d_various_interpolation_methods(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
for interpolation in ["nearest", "bilinear", "bicubic"]:
layers.UpSampling2D(size=(1, 2), interpolation=interpolation)(x)
@pytest.mark.skipif(
backend.backend() == "torch", reason="Torch does not support lanczos."
)
def test_upsampling_2d_lanczos_interpolation_methods(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
for interpolation in ["lanczos3", "lanczos5"]:
layers.UpSampling2D(size=(1, 2), interpolation=interpolation)(x)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/reshape_test.py | keras/src/layers/reshaping/reshape_test.py | import pytest
from absl.testing import parameterized
from keras.src import Sequential
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
class ReshapeTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_reshape(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, 4)},
input_shape=(3, 8),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (1, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 1, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_reshape_with_dynamic_batch_size(self):
input_layer = layers.Input(shape=(2, 4))
reshaped = layers.Reshape((8,))(input_layer)
self.assertEqual(reshaped.shape, (None, 8))
def test_reshape_with_dynamic_batch_size_and_minus_one(self):
input = KerasTensor((None, 6, 4))
layer = layers.Reshape((-1, 8))
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (None, 3, 8))
def test_reshape_layer_with_varying_input_size_and_minus_one(self):
layer = layers.Reshape((-1, 8))
res = layer(ops.ones((1, 6, 4), dtype="float32"))
self.assertEqual(res.shape, (1, 3, 8))
res = layer(ops.ones((1, 10, 4), dtype="float32"))
self.assertEqual(res.shape, (1, 5, 8))
def test_reshape_with_dynamic_dim_and_minus_one(self):
input = KerasTensor((4, 6, None, 3))
layer = layers.Reshape((-1, 3))
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (4, None, 3))
def test_reshape_sets_static_shape(self):
input_layer = layers.Input(batch_shape=(2, None))
reshaped = layers.Reshape((3, 5))(input_layer)
# Also make sure the batch dim is not lost after reshape.
self.assertEqual(reshaped.shape, (2, 3, 5))
@pytest.mark.requires_trainable_backend
def test_reshape_model_fit_with_varying_input_size_and_minus_one(self):
def generator():
yield (
ops.ones((1, 12, 2), dtype="float32"),
ops.zeros((1, 3, 8), dtype="float32"),
)
yield (
ops.ones((1, 20, 2), dtype="float32"),
ops.zeros((1, 5, 8), dtype="float32"),
)
layer = layers.Reshape((-1, 8))
model = Sequential([layer])
model.compile(loss="mean_squared_error")
model.fit(generator(), steps_per_epoch=2, epochs=1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling2d.py | keras/src/layers/reshaping/up_sampling2d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.UpSampling2D")
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
The implementation uses interpolative resizing, given the resize method
(specified by the `interpolation` argument). Use `interpolation=nearest`
to repeat the rows and columns of the data.
Example:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]]
Args:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `"channels_last"` (default) or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else `"channels_last"`.
Defaults to `"channels_last"`.
interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`,
`"lanczos5"`, `"nearest"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(
self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.size = argument_validation.standardize_tuple(size, 2, "size")
self.interpolation = interpolation.lower()
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
height = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
width = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
return (input_shape[0], input_shape[1], height, width)
else:
height = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
width = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
return (input_shape[0], height, width, input_shape[3])
def call(self, inputs):
return self._resize_images(
inputs,
self.size[0],
self.size[1],
self.data_format,
interpolation=self.interpolation,
)
def get_config(self):
config = {
"size": self.size,
"data_format": self.data_format,
"interpolation": self.interpolation,
}
base_config = super().get_config()
return {**base_config, **config}
def _resize_images(
self,
x,
height_factor,
width_factor,
data_format,
interpolation="nearest",
):
"""Resizes the images contained in a 4D tensor.
Args:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `"bicubic"`, `"bilinear"`,
`"lanczos3"`, `"lanczos5"`, or `"nearest"`.
Returns:
A tensor.
"""
if data_format not in {"channels_last", "channels_first"}:
raise ValueError(f"Invalid `data_format` argument: {data_format}")
if data_format == "channels_first":
x = ops.transpose(x, [0, 2, 3, 1])
# https://github.com/keras-team/keras/issues/294
# Use `ops.repeat` for `nearest` interpolation to enable XLA
if interpolation == "nearest":
x = ops.repeat(x, height_factor, axis=1)
x = ops.repeat(x, width_factor, axis=2)
else:
# multiply the height and width factor on each dim
# by hand (versus using element-wise multiplication
# by np.array([height_factor, width_factor]) then
# list-ifying the tensor by calling `.tolist()`)
# since when running under torchdynamo, `new_shape`
# will be traced as a symbolic variable (specifically
# a `FakeTensor`) which does not have a `tolist()` method.
shape = ops.shape(x)
new_shape = (
shape[1] * height_factor,
shape[2] * width_factor,
)
x = ops.image.resize(
x,
new_shape,
data_format="channels_last",
interpolation=interpolation,
)
if data_format == "channels_first":
x = ops.transpose(x, [0, 3, 1, 2])
return x
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/__init__.py | keras/src/layers/reshaping/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping1d.py | keras/src/layers/reshaping/cropping1d.py | from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.Cropping1D")
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Example:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = keras.layers.Cropping1D(cropping=1)(x)
>>> y
[[[2 3]]
[[8 9]]]
Args:
cropping: Int, or tuple of int (length 2), or dictionary.
- If int: how many units should be trimmed off at the beginning and
end of the cropping dimension (axis 1).
- If tuple of 2 ints: how many units should be trimmed off at the
beginning and end of the cropping dimension
(`(left_crop, right_crop)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super().__init__(**kwargs)
self.cropping = argument_validation.standardize_tuple(
cropping, 2, "cropping", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
if length <= 0:
raise ValueError(
"`cropping` parameter of `Cropping1D` layer must be "
"smaller than the input length. Received: input_shape="
f"{input_shape}, cropping={self.cropping}"
)
else:
length = None
return (input_shape[0], length, input_shape[2])
def call(self, inputs):
if (
inputs.shape[1] is not None
and sum(self.cropping) >= inputs.shape[1]
):
raise ValueError(
"`cropping` parameter of `Cropping1D` layer must be "
"smaller than the input length. Received: inputs.shape="
f"{inputs.shape}, cropping={self.cropping}"
)
if self.cropping[1] == 0:
return inputs[:, self.cropping[0] :, :]
else:
return inputs[:, self.cropping[0] : -self.cropping[1], :]
def get_config(self):
config = {"cropping": self.cropping}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping3d.py | keras/src/layers/reshaping/cropping3d.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.Cropping3D")
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Example:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> y.shape
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping is applied to depth, height,
and width.
- If tuple of 3 ints: interpreted as three different symmetric
cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis,
third_cropped_axis, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(
self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(cropping, int):
if cropping < 0:
raise ValueError(
"`cropping` cannot be negative. "
f"Received: cropping={cropping}."
)
self.cropping = (
(cropping, cropping),
(cropping, cropping),
(cropping, cropping),
)
elif hasattr(cropping, "__len__"):
if len(cropping) != 3:
raise ValueError(
f"`cropping` should have 3 elements. Received: {cropping}."
)
dim1_cropping = argument_validation.standardize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
dim2_cropping = argument_validation.standardize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
dim3_cropping = argument_validation.standardize_tuple(
cropping[2], 2, "3rd entry of cropping", allow_zero=True
)
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
"`cropping` should be either an int, a tuple of 3 ints "
"(symmetric_dim1_crop, symmetric_dim2_crop, "
"symmetric_dim3_crop), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_crop, right_dim1_crop),"
" (left_dim2_crop, right_dim2_crop),"
" (left_dim3_crop, right_dim2_crop)). "
f"Received: {cropping}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
spatial_dims = list(input_shape[2:5])
else:
spatial_dims = list(input_shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], *spatial_dims)
else:
return (input_shape[0], *spatial_dims, input_shape[4])
def call(self, inputs):
if self.data_format == "channels_first":
spatial_dims = list(inputs.shape[2:5])
else:
spatial_dims = list(inputs.shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
else:
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling3d.py | keras/src/layers/reshaping/up_sampling3d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.UpSampling3D")
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Example:
>>> input_shape = (2, 1, 2, 1, 3)
>>> x = np.ones(input_shape)
>>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x)
>>> y.shape
(2, 2, 4, 2, 3)
Args:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `"channels_last"` (default) or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else `"channels_last"`.
Defaults to `"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,
channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_dim1, upsampled_dim2,
upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.size = argument_validation.standardize_tuple(size, 3, "size")
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
dim1 = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
dim2 = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
dim3 = (
self.size[2] * input_shape[4]
if input_shape[4] is not None
else None
)
return (input_shape[0], input_shape[1], dim1, dim2, dim3)
else:
dim1 = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
dim2 = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
dim3 = (
self.size[2] * input_shape[3]
if input_shape[3] is not None
else None
)
return (input_shape[0], dim1, dim2, dim3, input_shape[4])
def call(self, inputs):
return self._resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format
)
def get_config(self):
config = {"size": self.size, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
def _resize_volumes(
self, x, depth_factor, height_factor, width_factor, data_format
):
"""Resizes the volume contained in a 5D tensor.
Args:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
Resized tensor.
"""
if data_format == "channels_first":
output = ops.repeat(x, depth_factor, axis=2)
output = ops.repeat(output, height_factor, axis=3)
output = ops.repeat(output, width_factor, axis=4)
return output
elif data_format == "channels_last":
output = ops.repeat(x, depth_factor, axis=1)
output = ops.repeat(output, height_factor, axis=2)
output = ops.repeat(output, width_factor, axis=3)
return output
else:
raise ValueError(f"Invalid data_format: {data_format}")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding3d.py | keras/src/layers/reshaping/zero_padding3d.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.ZeroPadding3D")
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Example:
>>> input_shape = (1, 1, 2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras.layers.ZeroPadding3D(padding=2)(x)
>>> y.shape
(1, 5, 6, 6, 3)
Args:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding is applied to depth, height,
and width.
- If tuple of 3 ints: interpreted as three different symmetric
padding values for depth, height, and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_padded_axis, second_padded_axis,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(
self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(padding, int):
self.padding = (
(padding, padding),
(padding, padding),
(padding, padding),
)
elif hasattr(padding, "__len__"):
if len(padding) != 3:
raise ValueError(
f"`padding` should have 3 elements. Received: {padding}."
)
dim1_padding = argument_validation.standardize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
dim2_padding = argument_validation.standardize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
dim3_padding = argument_validation.standardize_tuple(
padding[2], 2, "3rd entry of padding", allow_zero=True
)
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
"`padding` should be either an int, a tuple of 3 ints "
"(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_pad, right_dim1_pad),"
" (left_dim2_pad, right_dim2_pad),"
" (left_dim3_pad, right_dim2_pad)). "
f"Received: padding={padding}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
for index in range(0, 3):
if output_shape[index + spatial_dims_offset] is not None:
output_shape[index + spatial_dims_offset] += (
self.padding[index][0] + self.padding[index][1]
)
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), *self.padding)
else:
all_dims_padding = ((0, 0), *self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/flatten.py | keras/src/layers/reshaping/flatten.py | import math
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Flatten")
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Flatten()(x)
>>> y.shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == "channels_first"
def call(self, inputs):
input_shape = ops.shape(inputs)
rank = len(input_shape)
if self._channels_first and rank > 1:
# Switch to channels-last format.
inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif any(not isinstance(d, int) for d in non_batch_dims):
flattened_dim = -1
else:
flattened_dim = math.prod(non_batch_dims)
return ops.reshape(inputs, (input_shape[0], flattened_dim))
def compute_output_shape(self, input_shape):
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif any(d is None for d in non_batch_dims):
# NB: we cannot use the shorter `None in non_batch_dims` here b/c
# torchdynamo errors when calling `__contains__` op with
# a constant (in this case `None`) operand since it assumes
# that the elements in the collection are also `ConstantVariable`s
# but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`)
flattened_dim = None
else:
flattened_dim = math.prod(non_batch_dims)
return (input_shape[0], flattened_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def get_config(self):
config = {"data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/cropping2d.py | keras/src/layers/reshaping/cropping2d.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.Cropping2D")
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Example:
>>> input_shape = (2, 28, 28, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
>>> y.shape
(2, 24, 20, 3)
Args:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping is applied to height and
width.
- If tuple of 2 ints: interpreted as two different symmetric
cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints: interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, height, width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, height, width)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, cropped_height, cropped_width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, cropped_height, cropped_width)`
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(cropping, int):
if cropping < 0:
raise ValueError(
"`cropping` cannot be negative. "
f"Received: cropping={cropping}."
)
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, "__len__"):
if len(cropping) != 2:
raise ValueError(
"`cropping` should have two elements. "
f"Received: cropping={cropping}."
)
height_cropping = argument_validation.standardize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
width_cropping = argument_validation.standardize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError(
"`cropping` should be either an int, a tuple of 2 ints "
"(symmetric_height_crop, symmetric_width_crop), "
"or a tuple of 2 tuples of 2 ints "
"((top_crop, bottom_crop), (left_crop, right_crop)). "
f"Received: cropping={cropping}."
)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
if (
input_shape[2] is not None
and sum(self.cropping[0]) >= input_shape[2]
) or (
input_shape[3] is not None
and sum(self.cropping[1]) >= input_shape[3]
):
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
return (
input_shape[0],
input_shape[1],
(
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] is not None
else None
),
(
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] is not None
else None
),
)
else:
if (
input_shape[1] is not None
and sum(self.cropping[0]) >= input_shape[1]
) or (
input_shape[2] is not None
and sum(self.cropping[1]) >= input_shape[2]
):
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
return (
input_shape[0],
(
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] is not None
else None
),
(
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] is not None
else None
),
input_shape[3],
)
def call(self, inputs):
if self.data_format == "channels_first":
if (
inputs.shape[2] is not None
and sum(self.cropping[0]) >= inputs.shape[2]
) or (
inputs.shape[3] is not None
and sum(self.cropping[1]) >= inputs.shape[3]
):
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:, :, self.cropping[0][0] :, self.cropping[1][0] :
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
]
else:
if (
inputs.shape[1] is not None
and sum(self.cropping[0]) >= inputs.shape[1]
) or (
inputs.shape[2] is not None
and sum(self.cropping[1]) >= inputs.shape[2]
):
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:, self.cropping[0][0] :, self.cropping[1][0] :, :
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/permute.py | keras/src/layers/reshaping/permute.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(1, 3, 2)` permutes the second and third dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/zero_padding1d_test.py | keras/src/layers/reshaping/zero_padding1d_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding1DTest(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_1d(self, data_format):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=(1, 2), data_format=data_format)(
inputs
)
if data_format == "channels_last":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, :], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2], inputs)
@parameterized.named_parameters(("one_tuple", (2, 2)), ("one_int", 2))
def test_zero_padding_1d_with_same_padding(self, padding):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(
padding=padding, data_format="channels_last"
)(inputs)
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, :], inputs)
def test_zero_padding_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 3))
padded = layers.ZeroPadding1D((1, 2), data_format="channels_last")(
input_layer
)
self.assertEqual(padded.shape, (1, None, 3))
input_layer = layers.Input(batch_shape=(1, 2, 3))
padded = layers.ZeroPadding1D((1, 2), data_format="channels_last")(
input_layer
)
self.assertEqual(padded.shape, (1, 5, 3))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
)
def test_zero_padding_1d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_1d_get_config(self, data_format):
layer = layers.ZeroPadding1D(padding=(1, 2), data_format=data_format)
expected_config = {
"dtype": dtype_policies.serialize(layer.dtype_policy),
"data_format": data_format,
"name": layer.name,
"padding": (1, 2),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/repeat_vector_test.py | keras/src/layers/reshaping/repeat_vector_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class FlattenTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_repeat_vector(self):
inputs = np.random.random((2, 5)).astype("float32")
expected_output = ops.convert_to_tensor(
np.repeat(np.reshape(inputs, (2, 1, 5)), 3, axis=1)
)
self.run_layer_test(
layers.RepeatVector,
init_kwargs={"n": 3},
input_data=inputs,
expected_output=expected_output,
)
def test_repeat_vector_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 5))
repeated = layers.RepeatVector(n=3)(input_layer)
self.assertEqual(repeated.shape, (None, 3, 5))
def test_repeat_vector_with_dynamic_dimension(self):
input_layer = layers.Input(batch_shape=(2, None))
repeated = layers.RepeatVector(n=3)(input_layer)
self.assertEqual(repeated.shape, (2, 3, None))
def test_repeat_vector_with_invalid_n(self):
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n="3")
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n=3.5)
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n=[3])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/reshaping/up_sampling1d_test.py | keras/src/layers/reshaping/up_sampling1d_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
class UpSamplingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_upsampling_1d(self):
self.run_layer_test(
layers.UpSampling1D,
init_kwargs={"size": 2},
input_shape=(3, 5, 4),
expected_output_shape=(3, 10, 4),
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_upsampling_1d_correctness(self):
self.assertAllClose(
layers.UpSampling1D(size=2)(np.arange(12).reshape((2, 2, 3))),
np.array(
[
[
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[3.0, 4.0, 5.0],
],
[
[6.0, 7.0, 8.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0],
[9.0, 10.0, 11.0],
],
]
),
)
def test_upsampling_1d_correctness_with_ones(self):
self.assertAllClose(
layers.UpSampling1D(size=3)(np.ones((2, 1, 5))), np.ones((2, 3, 5))
)
def test_upsampling_1d_with_dynamic_batch_size(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(layers.UpSampling1D(size=2)(x).shape, (None, 4, 3))
self.assertEqual(layers.UpSampling1D(size=4)(x).shape, (None, 8, 3))
def test_upsampling_1d_with_dynamic_shape(self):
y = KerasTensor([2, None, 3])
self.assertEqual(layers.UpSampling1D(size=2)(y).shape, (2, None, 3))
self.assertEqual(layers.UpSampling1D(size=4)(y).shape, (2, None, 3))
z = KerasTensor([2, 3, None])
self.assertEqual(layers.UpSampling1D(size=2)(z).shape, (2, 6, None))
self.assertEqual(layers.UpSampling1D(size=4)(z).shape, (2, 12, None))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/text_vectorization.py | keras/src/layers/preprocessing/text_vectorization.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.layers.preprocessing.index_lookup import listify_tensors
from keras.src.layers.preprocessing.string_lookup import StringLookup
from keras.src.saving import serialization_lib
from keras.src.utils import argument_validation
from keras.src.utils import backend_utils
from keras.src.utils import tf_utils
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.TextVectorization")
class TextVectorization(Layer):
"""A preprocessing layer which maps text features to integer sequences.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one example = one string) into either a list
of token indices (one example = 1D tensor of integer token indices) or a
dense representation (one example = 1D tensor of float values representing
data about the example's tokens). This layer is meant to handle natural
language inputs. To handle simple string inputs (categorical strings or
pre-tokenized strings) see `kers_core.layers.StringLookup`.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. When this layer is adapted, it will analyze the
dataset, determine the frequency of individual string values, and create a
vocabulary from them. This vocabulary can have unlimited size or be capped,
depending on the configuration options for this layer; if there are more
unique values in the input than the maximum vocabulary size, the most
frequent terms will be used to create the vocabulary.
The processing of each example contains the following steps:
1. Standardize each example (usually lowercasing + punctuation stripping)
2. Split each example into substrings (usually words)
3. Recombine substrings into tokens (usually ngrams)
4. Index tokens (associate a unique int value with each token)
5. Transform each example using this index, either into a vector of ints or
a dense float vector.
Some notes on passing callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `keras.saving.register_keras_serializable`
for more details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`.
The callable should return a `tf.Tensor` of dtype `string`
with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting a vocabulary or when setting
`pad_to_max_tokens=True`. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is
`(max_tokens - 1 - (1 if output_mode == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be:
- `None`: No standardization.
- `"lower_and_strip_punctuation"`: Text will be lowercased and all
punctuation removed.
- `"lower"`: Text will be lowercased.
- `"strip_punctuation"`: All punctuation will be removed.
- Callable: Inputs will passed to the callable function,
which should be standardized and returned.
split: Optional specification for splitting the input text.
Values can be:
- `None`: No splitting.
- `"whitespace"`: Split on whitespace.
- `"character"`: Split on each unicode character.
- Callable: Standardized inputs will passed to the callable
function, which should be split and returned.
ngrams: Optional specification for ngrams to create from the
possibly-split input text. Values can be `None`, an integer
or tuple of integers; passing an integer will create ngrams
up to that integer, and passing a tuple of integers will
create ngrams for the specified values in the tuple.
Passing `None` means that no ngrams will be created.
output_mode: Optional specification for the output of the layer.
Values can be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`,
configuring the layer as follows:
- `"int"`: Outputs integer indices, one integer index per split
string token. When `output_mode == "int"`,
0 is reserved for masked locations;
this reduces the vocab size to `max_tokens - 2`
instead of `max_tokens - 1`.
- `"multi_hot"`: Outputs a single int array per batch, of either
vocab_size or max_tokens size, containing 1s in all elements
where the token mapped to that index exists at least
once in the batch item.
- `"count"`: Like `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the batch item.
- `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only rank 1 inputs
(and rank 2 outputs after splitting) are supported.
output_sequence_length: Only valid in INT mode. If set, the output will
have its time dimension padded or truncated to exactly
`output_sequence_length` values, resulting in a tensor of shape
`(batch_size, output_sequence_length)` regardless of how many tokens
resulted from the splitting step. Defaults to `None`. If `ragged`
is `True` then `output_sequence_length` may still truncate the
output.
pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`,
and `"tf_idf"` modes. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
vocabulary: Optional. Either an array of strings or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the string vocabulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
1D NumPy array, or 1D tensor of the same length as the vocabulary,
containing the floating point inverse document frequency weights,
which will be multiplied by per sample term counts for
the final `tf_idf` weight. If the `vocabulary` argument is set,
and `output_mode` is `"tf_idf"`, this argument must be supplied.
ragged: Boolean. Only applicable to `"int"` output mode.
Only supported with TensorFlow backend.
If `True`, returns a `RaggedTensor` instead of a dense `Tensor`,
where each sequence may have a different length
after string splitting. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
This example instantiates a `TextVectorization` layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> max_tokens = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len)
>>> # Now that the vocab layer has been created, call `adapt` on the
>>> # list of strings to create the vocabulary.
>>> vectorize_layer.adapt(["foo bar", "bar baz", "baz bada boom"])
>>> # Now, the layer can map strings to integers -- you can use an
>>> # embedding layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> vectorize_layer(input_data)
array([[4, 1, 3, 0],
[1, 2, 0, 0]])
This example instantiates a `TextVectorization` layer by passing a list
of vocabulary terms to the layer's `__init__()` method.
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = keras.layers.TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]')
>>> # as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
def __init__(
self,
max_tokens=None,
standardize="lower_and_strip_punctuation",
split="whitespace",
ngrams=None,
output_mode="int",
output_sequence_length=None,
pad_to_max_tokens=False,
vocabulary=None,
idf_weights=None,
sparse=False,
ragged=False,
encoding="utf-8",
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer TextVectorization requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse=True` can only be used with the TensorFlow backend."
)
if ragged and backend.backend() != "tensorflow":
raise ValueError(
"`ragged=True` can only be used with the TensorFlow backend."
)
# 'standardize' must be one of
# (None, "lower_and_strip_punctuation", "lower", "strip_punctuation",
# callable)
argument_validation.validate_string_arg(
standardize,
allowable_strings=(
"lower_and_strip_punctuation",
"lower",
"strip_punctuation",
),
caller_name=self.__class__.__name__,
arg_name="standardize",
allow_none=True,
allow_callables=True,
)
# 'split' must be one of (None, "whitespace", "character", callable)
argument_validation.validate_string_arg(
split,
allowable_strings=("whitespace", "character"),
caller_name=self.__class__.__name__,
arg_name="split",
allow_none=True,
allow_callables=True,
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
# 'ngrams' must be one of (None, int, tuple(int))
if not (
ngrams is None
or isinstance(ngrams, int)
or isinstance(ngrams, tuple)
and all(isinstance(item, int) for item in ngrams)
):
raise ValueError(
"`ngrams` must be None, an integer, or a tuple of "
f"integers. Received: ngrams={ngrams}"
)
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is "int"".
if output_mode == "int" and not (
isinstance(output_sequence_length, int)
or (output_sequence_length is None)
):
raise ValueError(
"`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. Received: "
f"output_sequence_length={output_sequence_length}"
)
if output_mode != "int" and output_sequence_length is not None:
raise ValueError(
"`output_sequence_length` must not be set if `output_mode` is "
"not 'int'. "
f"Received output_sequence_length={output_sequence_length}."
)
if ragged and output_mode != "int":
raise ValueError(
"`ragged` must not be true if `output_mode` is "
f"`'int'`. Received: ragged={ragged} and "
f"output_mode={output_mode}"
)
self._max_tokens = max_tokens
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._ragged = ragged
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._encoding = encoding
# We save this hidden option to persist the fact
# that we have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
vocabulary_size = kwargs.pop("vocabulary_size", None)
super().__init__(name=name, **kwargs)
self._lookup_layer = StringLookup(
max_tokens=max_tokens,
vocabulary=vocabulary,
idf_weights=idf_weights,
pad_to_max_tokens=pad_to_max_tokens,
mask_token="",
output_mode=output_mode,
sparse=sparse,
has_input_vocabulary=self._has_input_vocabulary,
encoding=encoding,
vocabulary_size=vocabulary_size,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
@property
def compute_dtype(self):
return "string"
@property
def variable_dtype(self):
return "string"
def build(self, input_shape=None):
pass
def compute_output_shape(self, input_shape):
if self._output_mode == "int":
return (input_shape[0], self._output_sequence_length)
if self._split is None:
if len(input_shape) <= 1:
input_shape = tuple(input_shape) + (1,)
else:
input_shape = tuple(input_shape) + (None,)
return self._lookup_layer.compute_output_shape(input_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
if self._output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, batch_size=None, steps=None):
"""Computes a vocabulary of string terms from tokens in a dataset.
Calling `adapt()` on a `TextVectorization` layer is an alternative to
passing in a precomputed vocabulary on construction via the `vocabulary`
argument. A `TextVectorization` layer should always be either adapted
over a dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary will be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of strings,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype="string")
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
self._lookup_layer.update_state(self._preprocess(data))
def finalize_state(self):
self._lookup_layer.finalize_state()
def reset_state(self):
self._lookup_layer.reset_state()
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include the padding and OOV tokens,
and a term's index in the vocabulary will equal
the term's index when calling the layer. If `False`, the
returned vocabulary will not include any padding
or OOV tokens.
"""
return self._lookup_layer.get_vocabulary(include_special_tokens)
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional
mask and OOV indices.
"""
return self._lookup_layer.vocabulary_size()
def get_config(self):
config = {
"max_tokens": self._lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._lookup_layer.pad_to_max_tokens,
"sparse": self._lookup_layer.sparse,
"ragged": self._ragged,
"vocabulary": listify_tensors(self._lookup_layer.input_vocabulary),
"idf_weights": listify_tensors(
self._lookup_layer.input_idf_weights
),
"encoding": self._encoding,
"vocabulary_size": self.vocabulary_size(),
}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if not isinstance(config["standardize"], str):
config["standardize"] = serialization_lib.deserialize_keras_object(
config["standardize"]
)
if not isinstance(config["split"], str):
config["split"] = serialization_lib.deserialize_keras_object(
config["split"]
)
if isinstance(config["ngrams"], list):
config["ngrams"] = tuple(config["ngrams"])
return cls(**config)
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and IDF weights for this layer directly,
instead of analyzing a dataset through `adapt()`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D NumPy array,
or 1D tensor containing the vocabulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D NumPy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary.
Must be set if `output_mode` is `"tf_idf"`.
Should not be set otherwise.
"""
self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
def _preprocess(self, inputs):
inputs = tf_utils.ensure_tensor(inputs, dtype=tf.string)
if self._standardize in ("lower", "lower_and_strip_punctuation"):
inputs = tf.strings.lower(inputs)
if self._standardize in (
"strip_punctuation",
"lower_and_strip_punctuation",
):
inputs = tf.strings.regex_replace(
inputs, r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']', ""
)
if callable(self._standardize):
inputs = self._standardize(inputs)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension
# 1 and so can be squeezed out. We do this here instead of after
# splitting for performance reasons - it's more expensive to squeeze
# a ragged tensor.
if inputs.shape.rank > 1:
if inputs.shape[-1] != 1:
raise ValueError(
"When using `TextVectorization` to tokenize strings, "
"the input rank must be 1 or the last shape dimension "
f"must be 1. Received: inputs.shape={inputs.shape} "
f"with rank={inputs.shape.rank}"
)
else:
inputs = tf.squeeze(inputs, axis=-1)
if self._split == "whitespace":
# This treats multiple whitespaces as one whitespace, and strips
# leading and trailing whitespace.
inputs = tf.strings.split(inputs)
elif self._split == "character":
inputs = tf.strings.unicode_split(inputs, "UTF-8")
elif callable(self._split):
inputs = self._split(inputs)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however,
# does support both ragged and dense inputs.
if self._ngrams is not None:
inputs = tf.strings.ngrams(
inputs, ngram_width=self._ngrams, separator=" "
)
return inputs
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
outputs = inputs
lookup_data = self._lookup_layer.call(inputs)
# For non-int output, we can return directly from the underlying layer.
if self._output_mode != "int":
return backend_utils.convert_tf_tensor(lookup_data)
# If we have a ragged tensor, we can pad during the conversion to dense.
if isinstance(lookup_data, tf.RaggedTensor) and not self._ragged:
shape = lookup_data.shape.as_list()
# If output sequence length is None, to_tensor will pad the last
# dimension to the bounding shape of the ragged dimension.
shape[-1] = self._output_sequence_length
outputs = lookup_data.to_tensor(default_value=0, shape=shape)
# If we have a dense tensor, we need to pad/trim directly.
elif self._output_sequence_length is not None:
# Maybe trim the output.
outputs = lookup_data[..., : self._output_sequence_length]
# Maybe pad the output. We need to be careful to use dynamic shape
# here as required_space_to_batch_paddings requires a fully known
# shape.
if not self._ragged:
shape = tf.shape(outputs)
padded_shape = tf.concat(
(shape[:-1], [self._output_sequence_length]), 0
)
padding, _ = tf.required_space_to_batch_paddings(
shape, padded_shape
)
outputs = tf.pad(outputs, padding)
# Because `tf.pad` used a dynamic shape, the output shape is
# dynamic. Apply the known static `_output_sequence_length`.
static_padded_shape = lookup_data.shape.as_list()
static_padded_shape[-1] = self._output_sequence_length
outputs.set_shape(static_padded_shape)
else:
outputs = lookup_data
return backend_utils.convert_tf_tensor(outputs)
def save_own_variables(self, store):
self._lookup_layer.save_own_variables(store)
def load_own_variables(self, store):
self._lookup_layer.load_own_variables(store)
def save_assets(self, dir_path):
self._lookup_layer.save_assets(dir_path)
def load_assets(self, dir_path):
self._lookup_layer.load_assets(dir_path)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/feature_space.py | keras/src/layers/preprocessing/feature_space.py | from keras.src import backend
from keras.src import layers
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.saving import saving_lib
from keras.src.saving import serialization_lib
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils import backend_utils
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.naming import auto_name
class Cross(KerasSaveable):
def __init__(self, feature_names, crossing_dim, output_mode="one_hot"):
if output_mode not in {"int", "one_hot"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot'}. "
f"Received: output_mode={output_mode}"
)
self.feature_names = tuple(feature_names)
self.crossing_dim = crossing_dim
self.output_mode = output_mode
def _obj_type(self):
return "Cross"
@property
def name(self):
return "_X_".join(self.feature_names)
def get_config(self):
return {
"feature_names": self.feature_names,
"crossing_dim": self.crossing_dim,
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
class Feature(KerasSaveable):
def __init__(self, dtype, preprocessor, output_mode):
if output_mode not in {"int", "one_hot", "float"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot', 'float'}. "
f"Received: output_mode={output_mode}"
)
self.dtype = dtype
if isinstance(preprocessor, dict):
preprocessor = serialization_lib.deserialize_keras_object(
preprocessor
)
self.preprocessor = preprocessor
self.output_mode = output_mode
def _obj_type(self):
return "Feature"
def get_config(self):
return {
"dtype": self.dtype,
"preprocessor": serialization_lib.serialize_keras_object(
self.preprocessor
),
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_export("keras.utils.FeatureSpace")
class FeatureSpace(Layer):
"""One-stop utility for preprocessing and encoding structured data.
Arguments:
feature_names: Dict mapping the names of your features to their
type specification, e.g. `{"my_feature": "integer_categorical"}`
or `{"my_feature": FeatureSpace.integer_categorical()}`.
For a complete list of all supported types, see
"Available feature types" paragraph below.
output_mode: One of `"concat"` or `"dict"`. In concat mode, all
features get concatenated together into a single vector.
In dict mode, the FeatureSpace returns a dict of individually
encoded features (with the same keys as the input dict keys).
crosses: List of features to be crossed together, e.g.
`crosses=[("feature_1", "feature_2")]`. The features will be
"crossed" by hashing their combined value into
a fixed-length vector.
crossing_dim: Default vector size for hashing crossed features.
Defaults to `32`.
hashing_dim: Default vector size for hashing features of type
`"integer_hashed"` and `"string_hashed"`. Defaults to `32`.
num_discretization_bins: Default number of bins to be used for
discretizing features of type `"float_discretized"`.
Defaults to `32`.
**Available feature types:**
Note that all features can be referred to by their string name,
e.g. `"integer_categorical"`. When using the string name, the default
argument values are used.
```python
# Plain float values.
FeatureSpace.float(name=None)
# Float values to be preprocessed via featurewise standardization
# (i.e. via a `keras.layers.Normalization` layer).
FeatureSpace.float_normalized(name=None)
# Float values to be preprocessed via linear rescaling
# (i.e. via a `keras.layers.Rescaling` layer).
FeatureSpace.float_rescaled(scale=1., offset=0., name=None)
# Float values to be discretized. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.float_discretized(
num_bins, bin_boundaries=None, output_mode="one_hot", name=None)
# Integer values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.integer_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# String values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.string_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# Integer values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.integer_hashed(num_bins, output_mode="one_hot", name=None)
# String values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.string_hashed(num_bins, output_mode="one_hot", name=None)
```
Examples:
**Basic usage with a dict of input data:**
```python
raw_data = {
"float_values": [0.0, 0.1, 0.2, 0.3],
"string_values": ["zero", "one", "two", "three"],
"int_values": [0, 1, 2, 3],
}
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
feature_space = FeatureSpace(
features={
"float_values": "float_normalized",
"string_values": "string_categorical",
"int_values": "integer_categorical",
},
crosses=[("string_values", "int_values")],
output_mode="concat",
)
# Before you start using the FeatureSpace,
# you must `adapt()` it on some data.
feature_space.adapt(dataset)
# You can call the FeatureSpace on a dict of data (batched or unbatched).
output_vector = feature_space(raw_data)
```
**Basic usage with `tf.data`:**
```python
# Unlabeled data
preprocessed_ds = unlabeled_dataset.map(feature_space)
# Labeled data
preprocessed_ds = labeled_dataset.map(lambda x, y: (feature_space(x), y))
```
**Basic usage with the Keras Functional API:**
```python
# Retrieve a dict Keras Input objects
inputs = feature_space.get_inputs()
# Retrieve the corresponding encoded Keras tensors
encoded_features = feature_space.get_encoded_features()
# Build a Functional model
outputs = keras.layers.Dense(1, activation="sigmoid")(encoded_features)
model = keras.Model(inputs, outputs)
```
**Customizing each feature or feature cross:**
```python
feature_space = FeatureSpace(
features={
"float_values": FeatureSpace.float_normalized(),
"string_values": FeatureSpace.string_categorical(max_tokens=10),
"int_values": FeatureSpace.integer_categorical(max_tokens=10),
},
crosses=[
FeatureSpace.cross(("string_values", "int_values"), crossing_dim=32)
],
output_mode="concat",
)
```
**Returning a dict of integer-encoded features:**
```python
feature_space = FeatureSpace(
features={
"string_values": FeatureSpace.string_categorical(output_mode="int"),
"int_values": FeatureSpace.integer_categorical(output_mode="int"),
},
crosses=[
FeatureSpace.cross(
feature_names=("string_values", "int_values"),
crossing_dim=32,
output_mode="int",
)
],
output_mode="dict",
)
```
**Specifying your own Keras preprocessing layer:**
```python
# Let's say that one of the features is a short text paragraph that
# we want to encode as a vector (one vector per paragraph) via TF-IDF.
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
# There's a Keras layer for this: TextVectorization.
custom_layer = layers.TextVectorization(output_mode="tf_idf")
# We can use FeatureSpace.feature to create a custom feature
# that will use our preprocessing layer.
feature_space = FeatureSpace(
features={
"text": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
feature_space.adapt(tf.data.Dataset.from_tensor_slices(data))
output_vector = feature_space(data)
```
**Retrieving the underlying Keras preprocessing layers:**
```python
# The preprocessing layer of each feature is available in `.preprocessors`.
preprocessing_layer = feature_space.preprocessors["feature1"]
# The crossing layer of each feature cross is available in `.crossers`.
# It's an instance of keras.layers.HashedCrossing.
crossing_layer = feature_space.crossers["feature1_X_feature2"]
```
**Saving and reloading a FeatureSpace:**
```python
feature_space.save("featurespace.keras")
reloaded_feature_space = keras.models.load_model("featurespace.keras")
```
"""
@classmethod
def cross(cls, feature_names, crossing_dim, output_mode="one_hot"):
return Cross(feature_names, crossing_dim, output_mode=output_mode)
@classmethod
def feature(cls, dtype, preprocessor, output_mode):
return Feature(dtype, preprocessor, output_mode)
@classmethod
def float(cls, name=None):
name = name or auto_name("float")
preprocessor = TFDIdentity(dtype="float32", name=f"{name}_preprocessor")
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_rescaled(cls, scale=1.0, offset=0.0, name=None):
name = name or auto_name("float_rescaled")
preprocessor = layers.Rescaling(
scale=scale, offset=offset, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_normalized(cls, name=None):
name = name or auto_name("float_normalized")
preprocessor = layers.Normalization(
axis=-1, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_discretized(
cls, num_bins, bin_boundaries=None, output_mode="one_hot", name=None
):
name = name or auto_name("float_discretized")
preprocessor = layers.Discretization(
num_bins=num_bins,
bin_boundaries=bin_boundaries,
name=f"{name}_preprocessor",
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("integer_categorical")
preprocessor = layers.IntegerLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("string_categorical")
preprocessor = layers.StringLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("string_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("integer_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
def __init__(
self,
features,
output_mode="concat",
crosses=None,
crossing_dim=32,
hashing_dim=32,
num_discretization_bins=32,
name=None,
):
super().__init__(name=name)
if not features:
raise ValueError("The `features` argument cannot be None or empty.")
self.crossing_dim = crossing_dim
self.hashing_dim = hashing_dim
self.num_discretization_bins = num_discretization_bins
self.features = {
name: self._standardize_feature(name, value)
for name, value in features.items()
}
self.crosses = []
if crosses:
feature_set = set(features.keys())
for cross in crosses:
if isinstance(cross, dict):
cross = serialization_lib.deserialize_keras_object(cross)
if isinstance(cross, Cross):
self.crosses.append(cross)
else:
if not crossing_dim:
raise ValueError(
"When specifying `crosses`, the argument "
"`crossing_dim` "
"(dimensionality of the crossing space) "
"should be specified as well."
)
for key in cross:
if key not in feature_set:
raise ValueError(
"All features referenced "
"in the `crosses` argument "
"should be present in the `features` dict. "
f"Received unknown features: {cross}"
)
self.crosses.append(Cross(cross, crossing_dim=crossing_dim))
self.crosses_by_name = {cross.name: cross for cross in self.crosses}
if output_mode not in {"dict", "concat"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'dict', 'concat'}. "
f"Received: output_mode={output_mode}"
)
self.output_mode = output_mode
self.inputs = {
name: self._feature_to_input(name, value)
for name, value in self.features.items()
}
self.preprocessors = {
name: value.preprocessor for name, value in self.features.items()
}
self.encoded_features = None
self.crossers = {
cross.name: self._cross_to_crosser(cross) for cross in self.crosses
}
self.one_hot_encoders = {}
self._is_adapted = False
self.concat = None
self._preprocessed_features_names = None
self._crossed_features_names = None
self._sublayers_built = False
def _feature_to_input(self, name, feature):
return layers.Input(shape=(1,), dtype=feature.dtype, name=name)
def _standardize_feature(self, name, feature):
if isinstance(feature, Feature):
return feature
if isinstance(feature, dict):
return serialization_lib.deserialize_keras_object(feature)
if feature == "float":
return self.float(name=name)
elif feature == "float_normalized":
return self.float_normalized(name=name)
elif feature == "float_rescaled":
return self.float_rescaled(name=name)
elif feature == "float_discretized":
return self.float_discretized(
name=name, num_bins=self.num_discretization_bins
)
elif feature == "integer_categorical":
return self.integer_categorical(name=name)
elif feature == "string_categorical":
return self.string_categorical(name=name)
elif feature == "integer_hashed":
return self.integer_hashed(self.hashing_dim, name=name)
elif feature == "string_hashed":
return self.string_hashed(self.hashing_dim, name=name)
else:
raise ValueError(f"Invalid feature type: {feature}")
def _cross_to_crosser(self, cross):
return layers.HashedCrossing(cross.crossing_dim, name=cross.name)
def _list_adaptable_preprocessors(self):
adaptable_preprocessors = []
for name in self.features.keys():
preprocessor = self.preprocessors[name]
# Special case: a Normalization layer with preset mean/variance.
# Not adaptable.
if isinstance(preprocessor, layers.Normalization):
if preprocessor.input_mean is not None:
continue
# Special case: a TextVectorization layer with provided vocabulary.
elif isinstance(preprocessor, layers.TextVectorization):
if preprocessor._has_input_vocabulary:
continue
if hasattr(preprocessor, "adapt"):
adaptable_preprocessors.append(name)
return adaptable_preprocessors
def adapt(self, dataset):
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"`adapt()` can only be called on a tf.data.Dataset. "
f"Received instead: {dataset} (of type {type(dataset)})"
)
for name in self._list_adaptable_preprocessors():
# Call adapt() on each individual adaptable layer.
# TODO: consider rewriting this to instead iterate on the
# dataset once, split each batch into individual features,
# and call the layer's `_adapt_function` on each batch
# to simulate the behavior of adapt() in a more performant fashion.
feature_dataset = dataset.map(lambda x: x[name])
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
x = next(iter(feature_dataset))
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
if len(x.shape) in {0, 1}:
# If the rank is 1, add a dimension
# so we can reduce on axis=-1.
# Note: if rank was previously 0, it is now 1.
feature_dataset = feature_dataset.map(
lambda x: tf.expand_dims(x, -1)
)
preprocessor.adapt(feature_dataset)
self._is_adapted = True
self.get_encoded_features() # Finish building the layer
self.built = True
self._sublayers_built = True
def get_inputs(self):
self._check_if_built()
return self.inputs
def get_encoded_features(self):
self._check_if_adapted()
if self.encoded_features is None:
preprocessed_features = self._preprocess_features(self.inputs)
crossed_features = self._cross_features(preprocessed_features)
merged_features = self._merge_features(
preprocessed_features, crossed_features
)
self.encoded_features = merged_features
return self.encoded_features
def _preprocess_features(self, features):
return {
name: self.preprocessors[name](features[name])
for name in features.keys()
}
def _cross_features(self, features):
all_outputs = {}
for cross in self.crosses:
inputs = [features[name] for name in cross.feature_names]
outputs = self.crossers[cross.name](inputs)
all_outputs[cross.name] = outputs
return all_outputs
def _merge_features(self, preprocessed_features, crossed_features):
if not self._preprocessed_features_names:
self._preprocessed_features_names = sorted(
preprocessed_features.keys()
)
self._crossed_features_names = sorted(crossed_features.keys())
all_names = (
self._preprocessed_features_names + self._crossed_features_names
)
all_features = [
preprocessed_features[name]
for name in self._preprocessed_features_names
] + [crossed_features[name] for name in self._crossed_features_names]
if self.output_mode == "dict":
output_dict = {}
else:
features_to_concat = []
if self._sublayers_built:
# Fast mode.
for name, feature in zip(all_names, all_features):
encoder = self.one_hot_encoders.get(name, None)
if encoder:
feature = encoder(feature)
if self.output_mode == "dict":
output_dict[name] = feature
else:
features_to_concat.append(feature)
if self.output_mode == "dict":
return output_dict
else:
return self.concat(features_to_concat)
# If the object isn't built,
# we create the encoder and concat layers below
all_specs = [
self.features[name] for name in self._preprocessed_features_names
] + [
self.crosses_by_name[name] for name in self._crossed_features_names
]
for name, feature, spec in zip(all_names, all_features, all_specs):
if tree.is_nested(feature):
dtype = tree.flatten(feature)[0].dtype
else:
dtype = feature.dtype
dtype = backend.standardize_dtype(dtype)
if spec.output_mode == "one_hot":
preprocessor = self.preprocessors.get(
name
) or self.crossers.get(name)
cardinality = None
if not dtype.startswith("int"):
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"Thus its preprocessor should return an integer dtype. "
f"Instead it returns a {dtype} dtype."
)
if isinstance(
preprocessor, (layers.IntegerLookup, layers.StringLookup)
):
cardinality = preprocessor.vocabulary_size()
elif isinstance(preprocessor, layers.CategoryEncoding):
cardinality = preprocessor.num_tokens
elif isinstance(preprocessor, layers.Discretization):
cardinality = preprocessor.num_bins
elif isinstance(
preprocessor, (layers.HashedCrossing, layers.Hashing)
):
cardinality = preprocessor.num_bins
else:
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"However it isn't a standard feature and the "
"dimensionality of its output space is not known, "
"thus it cannot be one-hot encoded. "
"Try using `output_mode='int'`."
)
if cardinality is not None:
encoder = layers.CategoryEncoding(
num_tokens=cardinality, output_mode="multi_hot"
)
self.one_hot_encoders[name] = encoder
feature = encoder(feature)
if self.output_mode == "concat":
dtype = feature.dtype
if dtype.startswith("int") or dtype == "string":
raise ValueError(
f"Cannot concatenate features because feature '{name}' "
f"has not been encoded (it has dtype {dtype}). "
"Consider using `output_mode='dict'`."
)
features_to_concat.append(feature)
else:
output_dict[name] = feature
if self.output_mode == "concat":
self.concat = TFDConcat(axis=-1)
return self.concat(features_to_concat)
else:
return output_dict
def _check_if_adapted(self):
if not self._is_adapted:
if not self._list_adaptable_preprocessors():
self._is_adapted = True
else:
raise ValueError(
"You need to call `.adapt(dataset)` on the FeatureSpace "
"before you can start using it."
)
def _check_if_built(self):
if not self._sublayers_built:
self._check_if_adapted()
# Finishes building
self.get_encoded_features()
self._sublayers_built = True
def _convert_input(self, x):
if not isinstance(x, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if not isinstance(x, (list, tuple, int, float)):
x = backend.convert_to_numpy(x)
x = tf.convert_to_tensor(x)
return x
def __call__(self, data):
self._check_if_built()
if not isinstance(data, dict):
raise ValueError(
"A FeatureSpace can only be called with a dict. "
f"Received: data={data} (of type {type(data)}"
)
# Many preprocessing layers support all backends but many do not.
# Switch to TF to make FeatureSpace work universally.
data = {key: self._convert_input(value) for key, value in data.items()}
rebatched = False
for name, x in data.items():
if len(x.shape) == 0:
data[name] = tf.reshape(x, (1, 1))
rebatched = True
elif len(x.shape) == 1:
data[name] = tf.expand_dims(x, -1)
with backend_utils.TFGraphScope():
# This scope is to make sure that inner DataLayers
# will not convert outputs back to backend-native --
# they should be TF tensors throughout
preprocessed_data = self._preprocess_features(data)
preprocessed_data = tree.map_structure(
lambda x: self._convert_input(x), preprocessed_data
)
crossed_data = self._cross_features(preprocessed_data)
crossed_data = tree.map_structure(
lambda x: self._convert_input(x), crossed_data
)
merged_data = self._merge_features(preprocessed_data, crossed_data)
if rebatched:
if self.output_mode == "concat":
assert merged_data.shape[0] == 1
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
merged_data = backend.convert_to_numpy(merged_data)
merged_data = tf.squeeze(merged_data, axis=0)
else:
for name, x in merged_data.items():
if len(x.shape) == 2 and x.shape[0] == 1:
merged_data[name] = tf.squeeze(x, axis=0)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
merged_data = tree.map_structure(
lambda x: backend.convert_to_tensor(x, dtype=x.dtype),
merged_data,
)
return merged_data
def get_config(self):
return {
"features": serialization_lib.serialize_keras_object(self.features),
"output_mode": self.output_mode,
"crosses": serialization_lib.serialize_keras_object(self.crosses),
"crossing_dim": self.crossing_dim,
"hashing_dim": self.hashing_dim,
"num_discretization_bins": self.num_discretization_bins,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def get_build_config(self):
return {
name: feature.preprocessor.get_build_config()
for name, feature in self.features.items()
}
def build_from_config(self, config):
for name in config.keys():
preprocessor = self.features[name].preprocessor
if not preprocessor.built:
preprocessor.build_from_config(config[name])
self._is_adapted = True
def save(self, filepath):
"""Save the `FeatureSpace` instance to a `.keras` file.
You can reload it via `keras.models.load_model()`:
```python
feature_space.save("featurespace.keras")
reloaded_fs = keras.models.load_model("featurespace.keras")
```
"""
saving_lib.save_model(self, filepath)
def save_own_variables(self, store):
return
def load_own_variables(self, store):
return
class TFDConcat(DataLayer):
def __init__(self, axis, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def call(self, xs):
return self.backend.numpy.concatenate(xs, axis=self.axis)
class TFDIdentity(DataLayer):
def call(self, x):
return x
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/category_encoding_test.py | keras/src/layers/preprocessing/category_encoding_test.py | import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
TEST_CASES = [{"testcase_name": "dense", "sparse": False}]
if backend.SUPPORTS_SPARSE_TENSORS:
TEST_CASES += [{"testcase_name": "sparse", "sparse": True}]
class CategoryEncodingTest(testing.TestCase):
@parameterized.named_parameters(TEST_CASES)
def test_count_output(self, sparse):
input_array = np.array([1, 2, 3, 1])
expected_output = np.array([0, 2, 1, 1, 0, 0])
num_tokens = 6
expected_output_shape = (num_tokens,)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_count_weighted_output(self, sparse):
input_array = np.array([[0, 1], [0, 0], [1, 2], [3, 1]])
count_weights = np.array(
[[0.1, 0.2], [0.1, 0.1], [0.2, 0.3], [0.4, 0.2]]
)
expected_output = np.array(
[
[0.1, 0.2, 0.0, 0.0, 0.0, 0.0],
[0.2, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.3, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.4, 0.0, 0.0],
]
)
num_tokens = 6
expected_output_shape = (input_array.shape[0], num_tokens)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array, count_weights=count_weights)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32"),
count_weights=layers.Input(
batch_shape=input_array.shape, dtype="float32"
),
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_batched_count_output(self, sparse):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = np.array([[0, 2, 1, 1, 0, 0], [2, 1, 0, 1, 0, 0]])
num_tokens = 6
expected_output_shape = (2, num_tokens)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_multi_hot(self, sparse):
input_data = np.array([3, 2, 0, 1])
expected_output = np.array([1, 1, 1, 1, 0, 0])
num_tokens = 6
expected_output_shape = (num_tokens,)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_batched_multi_hot(self, sparse):
input_data = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array([[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 0]])
num_tokens = 6
expected_output_shape = (input_data.shape[0], num_tokens)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
# Test compute_output_shape
input_data = np.array((4))
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
@parameterized.named_parameters(TEST_CASES)
def test_one_hot(self, sparse):
input_data = np.array([3, 2, 0, 1])
expected_output = np.array(
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
]
)
num_tokens = 6
expected_output_shape = (input_data.shape[0], num_tokens)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
# Test compute_output_shape
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
# Test compute_output_shape with 1 extra dimension
input_data = np.array([[3], [2], [0], [1]])
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
input_data = np.array((4,))
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
@parameterized.named_parameters(TEST_CASES)
def test_batched_one_hot(self, sparse):
input_data = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array(
[
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
],
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
],
]
)
num_tokens = 6
expected_output_shape = input_data.shape[0:2] + (num_tokens,)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
def test_tf_data_compatibility(self):
layer = layers.CategoryEncoding(
num_tokens=4, output_mode="one_hot", dtype="int32"
)
input_data = np.array([3, 2, 0, 1])
expected_output = np.array(
[
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, expected_output)
def test_category_encoding_without_num_tokens(self):
with self.assertRaisesRegex(
ValueError, r"num_tokens must be set to use this layer"
):
layers.CategoryEncoding(output_mode="multi_hot")
def test_category_encoding_with_invalid_num_tokens(self):
with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"):
layers.CategoryEncoding(num_tokens=0, output_mode="multi_hot")
with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"):
layers.CategoryEncoding(num_tokens=-1, output_mode="multi_hot")
def test_category_encoding_with_unnecessary_count_weights(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="multi_hot")
input_data = np.array([0, 1, 2, 3])
count_weights = np.array([0.1, 0.2, 0.3, 0.4])
with self.assertRaisesRegex(
ValueError, r"`count_weights` is not used when `output_mode`"
):
layer(input_data, count_weights=count_weights)
def test_invalid_output_mode_raises_error(self):
with self.assertRaisesRegex(
ValueError, r"Unknown arg for output_mode: invalid_mode"
):
layers.CategoryEncoding(num_tokens=4, output_mode="invalid_mode")
def test_encode_one_hot_single_sample(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot")
input_array = np.array([1, 2, 3, 1])
expected_output = np.array(
[
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
]
)
output = layer._encode(input_array)
self.assertAllClose(expected_output, output)
def test_encode_one_hot_batched_samples(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot")
input_array = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array(
[
[[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]],
[[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]],
]
)
output = layer._encode(input_array)
self.assertAllClose(expected_output, output)
def test_count_single_sample(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="count")
input_array = np.array([1, 2, 3, 1])
expected_output = np.array([0, 2, 1, 1])
output = layer(input_array)
self.assertAllClose(expected_output, output)
def test_count_batched_samples(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="count")
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = np.array([[0, 2, 1, 1], [2, 1, 0, 1]])
output = layer(input_array)
self.assertAllClose(expected_output, output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/mel_spectrogram.py | keras/src/layers/preprocessing/mel_spectrogram.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.data_layer import DataLayer
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
@keras_export("keras.layers.MelSpectrogram")
class MelSpectrogram(DataLayer):
"""A preprocessing layer to convert raw audio signals to Mel spectrograms.
This layer takes `float32`/`float64` single or batched audio signal as
inputs and computes the Mel spectrogram using Short-Time Fourier Transform
and Mel scaling. The input should be a 1D (unbatched) or 2D (batched) tensor
representing audio signals. The output will be a 2D or 3D tensor
representing Mel spectrograms.
A spectrogram is an image-like representation that shows the frequency
spectrum of a signal over time. It uses x-axis to represent time, y-axis to
represent frequency, and each pixel to represent intensity.
Mel spectrograms are a special type of spectrogram that use the mel scale,
which approximates how humans perceive sound. They are commonly used in
speech and music processing tasks like speech recognition, speaker
identification, and music genre classification.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [Spectrogram](https://en.wikipedia.org/wiki/Spectrogram),
- [Mel scale](https://en.wikipedia.org/wiki/Mel_scale).
Args:
fft_length: Integer, size of the FFT window.
sequence_stride: Integer, number of samples between successive STFT
columns.
sequence_length: Integer, size of the window used for applying
`window` to each audio frame. If `None`, defaults to `fft_length`.
window: String, name of the window function to use. Available values
are `"hann"` and `"hamming"`. If `window` is a tensor, it will be
used directly as the window and its length must be
`sequence_length`. If `window` is `None`, no windowing is
used. Defaults to `"hann"`.
sampling_rate: Integer, sample rate of the input signal.
num_mel_bins: Integer, number of mel bins to generate.
min_freq: Float, minimum frequency of the mel bins.
max_freq: Float, maximum frequency of the mel bins.
If `None`, defaults to `sampling_rate / 2`.
power_to_db: If True, convert the power spectrogram to decibels.
top_db: Float, minimum negative cut-off `max(10 * log10(S)) - top_db`.
mag_exp: Float, exponent for the magnitude spectrogram.
1 for magnitude, 2 for power, etc. Default is 2.
ref_power: Float, the power is scaled relative to it
`10 * log10(S / ref_power)`.
min_power: Float, minimum value for power and `ref_power`.
Examples:
**Unbatched audio signal**
>>> layer = keras.layers.MelSpectrogram(num_mel_bins=64,
... sampling_rate=8000,
... sequence_stride=256,
... fft_length=2048)
>>> layer(keras.random.uniform(shape=(16000,))).shape
(64, 63)
**Batched audio signal**
>>> layer = keras.layers.MelSpectrogram(num_mel_bins=80,
... sampling_rate=8000,
... sequence_stride=128,
... fft_length=2048)
>>> layer(keras.random.uniform(shape=(2, 16000))).shape
(2, 80, 125)
Input shape:
1D (unbatched) or 2D (batched) tensor with shape:`(..., samples)`.
Output shape:
2D (unbatched) or 3D (batched) tensor with
shape:`(..., num_mel_bins, time)`.
"""
def __init__(
self,
fft_length=2048,
sequence_stride=512,
sequence_length=None,
window="hann",
sampling_rate=16000,
num_mel_bins=128,
min_freq=20.0,
max_freq=None,
power_to_db=True,
top_db=80.0,
mag_exp=2.0,
min_power=1e-10,
ref_power=1.0,
**kwargs,
):
self.fft_length = fft_length
self.sequence_stride = sequence_stride
self.sequence_length = sequence_length or fft_length
self.window = window
self.sampling_rate = sampling_rate
self.num_mel_bins = num_mel_bins
self.min_freq = min_freq
self.max_freq = max_freq or int(sampling_rate / 2)
self.power_to_db = power_to_db
self.top_db = top_db
self.mag_exp = mag_exp
self.min_power = min_power
self.ref_power = ref_power
super().__init__(**kwargs)
def call(self, inputs):
dtype = (
"float32"
if self.compute_dtype not in ["float32", "float64"]
else self.compute_dtype
) # jax, tf supports only "float32" and "float64" in stft
inputs = self.backend.convert_to_tensor(inputs, dtype=dtype)
outputs = self._spectrogram(inputs)
outputs = self._melscale(outputs)
if self.power_to_db:
outputs = self._dbscale(outputs)
# swap time & freq axis to have shape of (..., num_mel_bins, time)
outputs = self.backend.numpy.swapaxes(outputs, -1, -2)
outputs = self.backend.cast(outputs, self.compute_dtype)
return outputs
def _spectrogram(self, inputs):
real, imag = self.backend.math.stft(
inputs,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
fft_length=self.fft_length,
window=self.window,
center=True,
)
# abs of complex = sqrt(real^2 + imag^2)
spec = self.backend.numpy.sqrt(
self.backend.numpy.add(
self.backend.numpy.square(real), self.backend.numpy.square(imag)
)
)
spec = self.backend.numpy.power(spec, self.mag_exp)
return spec
def _melscale(self, inputs):
matrix = self.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_bins,
num_spectrogram_bins=self.backend.shape(inputs)[-1],
sampling_rate=self.sampling_rate,
lower_edge_hertz=self.min_freq,
upper_edge_hertz=self.max_freq,
)
return self.backend.numpy.tensordot(inputs, matrix, axes=1)
def _dbscale(self, inputs):
log_spec = 10.0 * (
self.backend.numpy.log10(
self.backend.numpy.maximum(inputs, self.min_power)
)
)
ref_value = self.backend.numpy.abs(
self.backend.convert_to_tensor(self.ref_power)
)
log_spec -= 10.0 * self.backend.numpy.log10(
self.backend.numpy.maximum(ref_value, self.min_power)
)
log_spec = self.backend.numpy.maximum(
log_spec, self.backend.numpy.max(log_spec) - self.top_db
)
return log_spec
def _hertz_to_mel(self, frequencies_hertz):
"""Converts frequencies in `frequencies_hertz` in Hertz to the
mel scale.
Args:
frequencies_hertz: A tensor of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A tensor of the same shape and type of `frequencies_hertz`
containing frequencies in the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * self.backend.numpy.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)
)
def linear_to_mel_weight_matrix(
self,
num_mel_bins=20,
num_spectrogram_bins=129,
sampling_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype="float32",
):
"""Returns a matrix to warp linear scale spectrograms to the mel scale.
Returns a weight matrix that can be used to re-weight a tensor
containing `num_spectrogram_bins` linearly sampled frequency information
from `[0, sampling_rate / 2]` into `num_mel_bins` frequency information
from `[lower_edge_hertz, upper_edge_hertz]` on the mel scale.
This function follows the [Hidden Markov Model Toolkit (HTK)](
http://htk.eng.cam.ac.uk/) convention, defining the mel scale in
terms of a frequency in hertz according to the following formula:
```mel(f) = 2595 * log10( 1 + f/700)```
In the returned matrix, all the triangles (filterbanks) have a peak
value of 1.0.
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a
"mel spectrogram" `M` of shape `[frames, num_mel_bins]`.
```
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = keras.ops.matmul(S, A)
```
The matrix can be used with `keras.ops.tensordot` to convert an
arbitrary rank `Tensor` of linear-scale spectral bins into the
mel scale.
```
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = keras.ops.tensordot(S, A, 1)
```
References:
- [Mel scale (Wikipedia)](https://en.wikipedia.org/wiki/Mel_scale)
Args:
num_mel_bins: Python int. How many bands in the resulting
mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are
in the source spectrogram data, which is understood to be
`fft_size // 2 + 1`, i.e. the spectrogram only contains the
nonredundant FFT bins.
sampling_rate: An integer or float `Tensor`. Samples per second of
the input signal used to create the spectrogram. Used to figure
out the frequencies corresponding to each spectrogram bin,
which dictates how they are mapped into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower
edge of the lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point
type.
Returns:
A tensor of shape `[num_spectrogram_bins, num_mel_bins]`.
"""
# This function can be constant folded by graph optimization since
# there are no Tensor inputs.
sampling_rate = self.backend.cast(sampling_rate, dtype)
lower_edge_hertz = self.backend.convert_to_tensor(
lower_edge_hertz,
dtype,
)
upper_edge_hertz = self.backend.convert_to_tensor(
upper_edge_hertz,
dtype,
)
zero = self.backend.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sampling_rate / 2.0
linear_frequencies = self.backend.numpy.linspace(
zero, nyquist_hertz, num_spectrogram_bins
)[bands_to_zero:]
spectrogram_bins_mel = self.backend.numpy.expand_dims(
self._hertz_to_mel(linear_frequencies), 1
)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = self.backend.math.extract_sequences(
self.backend.numpy.linspace(
self._hertz_to_mel(lower_edge_hertz),
self._hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
),
sequence_length=3,
sequence_stride=1,
)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(
self.backend.numpy.reshape(t, [1, num_mel_bins])
for t in self.backend.numpy.split(band_edges_mel, 3, axis=1)
)
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel
)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel
)
# Intersect the line segments with each other and zero.
mel_weights_matrix = self.backend.numpy.maximum(
zero, self.backend.numpy.minimum(lower_slopes, upper_slopes)
)
# Re-add the zeroed lower bins we sliced out above.
return self.backend.numpy.pad(
mel_weights_matrix,
[[bands_to_zero, 0], [0, 0]],
)
def compute_output_shape(self, input_shape):
if len(input_shape) == 1:
output_shape = [
self.num_mel_bins,
(
(input_shape[0] + self.sequence_stride + 1)
// self.sequence_stride
if input_shape[0] is not None
else None
),
]
else:
output_shape = [
input_shape[0],
self.num_mel_bins,
(
(input_shape[1] + self.sequence_stride + 1)
// self.sequence_stride
if input_shape[1] is not None
else None
),
]
return output_shape
def get_config(self):
config = super().get_config()
config.update(
{
"fft_length": self.fft_length,
"sequence_stride": self.sequence_stride,
"sequence_length": self.sequence_length,
"window": self.window,
"sampling_rate": self.sampling_rate,
"num_mel_bins": self.num_mel_bins,
"min_freq": self.min_freq,
"max_freq": self.max_freq,
"power_to_db": self.power_to_db,
"top_db": self.top_db,
"mag_exp": self.mag_exp,
"min_power": self.min_power,
"ref_power": self.ref_power,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/rescaling_test.py | keras/src/layers/preprocessing/rescaling_test.py | import grain
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RescalingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
next(iter(ds)).numpy()
def test_grain_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = grain.MapDataset.source(x).to_iter_dataset().batch(3).map(layer)
output = next(iter(ds))
self.assertTrue(backend.is_tensor(output))
# Ensure the device of the data is on CPU.
if backend.backend() == "tensorflow":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "jax":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "torch":
self.assertEqual("cpu", str(output.device))
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/index_lookup_test.py | keras/src/layers/preprocessing/index_lookup_test.py | import os
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.saving import saving_api
@pytest.mark.skipif(
backend.backend() == "numpy", reason="Failing for numpy backend."
)
class IndexLookupLayerTest(testing.TestCase):
def test_basics_string_vocab(self):
# Case: adapt + list inputs
adapt_data = ["one", "one", "one", "two", "two", "three"]
input_data = ["one", "two", "four"]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: numpy array input
output = layer(np.array(input_data))
self.assertEqual(list(output), [2, 3, 1])
# Case: fixed vocab + list inputs
vocabulary = ["one", "two", "three"]
layer = layers.IndexLookup(vocabulary=vocabulary, **kwargs)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: fixed vocab with special tokens + list inputs
vocabulary_with_special_tokens = ["", "[OOV]", "one", "two", "three"]
layer = layers.IndexLookup(
vocabulary=vocabulary_with_special_tokens, **kwargs
)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: set vocabulary
layer = layers.IndexLookup(**kwargs)
layer.set_vocabulary(vocabulary)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: set vocabulary (with special tokens)
layer = layers.IndexLookup(**kwargs)
layer.set_vocabulary(vocabulary_with_special_tokens)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
def test_basics_integer_vocab(self):
# Case: adapt + list inputs
adapt_data = [1, 1, 1, 2, 2, 3]
input_data = [1, 2, 4]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": "int64",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2, 3])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2, 3],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: numpy array input
output = layer(np.array(input_data))
self.assertEqual(list(output), [2, 3, 1])
# Case: fixed vocab + list inputs
vocabulary = [1, 2, 3]
layer = layers.IndexLookup(vocabulary=vocabulary, **kwargs)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2, 3])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2, 3],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: fixed vocab with special tokens + list inputs
vocabulary_with_special_tokens = [0, -1, 1, 2, 3]
layer = layers.IndexLookup(
vocabulary=vocabulary_with_special_tokens, **kwargs
)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2, 3])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2, 3],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: set vocabulary
layer = layers.IndexLookup(**kwargs)
layer.set_vocabulary(vocabulary)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2, 3])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2, 3],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
# Case: set vocabulary (with special tokens)
layer = layers.IndexLookup(**kwargs)
layer.set_vocabulary(vocabulary_with_special_tokens)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2, 3])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2, 3],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
def test_max_tokens_adapt(self):
adapt_data = [1, 1, 1, 2, 2, 3]
input_data = [1, 2, 3, 4]
kwargs = {
"max_tokens": 4,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": "int64",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
self.assertEqual(layer.get_vocabulary(), [0, -1, 1, 2])
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
[1, 2],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
def test_pad_to_max_tokens(self):
vocabulary = [1, 2]
input_data = [1, 2]
kwargs = {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": "int64",
"vocabulary": vocabulary,
"pad_to_max_tokens": True,
"output_mode": "multi_hot",
}
layer = layers.IndexLookup(**kwargs)
output = layer(input_data)
self.assertAllClose(output, [0, 1, 1, 0, 0])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
def test_output_modes(self):
vocabulary = ["one", "two", "three"]
single_sample_input_data = ["one", "two", "four"]
batch_input_data = [["one", "two", "four", "two"]]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
"vocabulary": vocabulary,
}
# int
kwargs["output_mode"] = "int"
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertAllClose(output, [2, 3, 1])
output = layer(batch_input_data)
self.assertAllClose(output, [[2, 3, 1, 3]])
# multi-hot
kwargs["output_mode"] = "multi_hot"
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertAllClose(output, [1, 1, 1, 0])
output = layer(batch_input_data)
self.assertAllClose(output, [[1, 1, 1, 0]])
# one-hot
kwargs["output_mode"] = "one_hot"
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertAllClose(output, [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]])
# count
kwargs["output_mode"] = "count"
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertAllClose(output, [1, 1, 1, 0])
output = layer(batch_input_data)
self.assertAllClose(output, [[1, 1, 2, 0]])
# tf-idf
kwargs["output_mode"] = "tf_idf"
kwargs["idf_weights"] = np.array([0.1, 0.2, 0.3])
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertAllClose(output, [0.2, 0.1, 0.2, 0.0])
output = layer(batch_input_data)
self.assertAllClose(output, [[0.2, 0.1, 0.4, 0.0]])
def test_sparse_outputs(self):
# TODO
pass
def test_adapt_tf_idf(self):
# Case: unbatched data
adapt_data = ["one", "one", "one", "two", "two", "three"]
input_data = ["one", "two", "four"]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
"output_mode": "tf_idf",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
output = layer(input_data)
# Document counts for one, two, three = [3, 2, 1]
idf_weights = np.log(1 + len(adapt_data) / (1 + np.array([3, 2, 1])))
self.assertAllClose(layer.idf_weights[1:], idf_weights)
self.assertAllClose(output, [1.1337324, 0.91629076, 1.0986123, 0.0])
# Case: batched data
adapt_data = [["one", "one"], ["one", "two"], ["two", "three"]]
input_data = [["one", "two"], ["two", "four"]]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
"output_mode": "tf_idf",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
# Document counts for one, two, three = [2, 2, 1]
idf_weights = np.log(1 + len(adapt_data) / (1 + np.array([2, 2, 1])))
self.assertAllClose(layer.idf_weights[1:], idf_weights)
output = layer(input_data)
self.assertAllClose(
output,
[
[0.0, 0.6931472, 0.6931472, 0.0],
[0.76752836, 0.0, 0.6931472, 0.0],
],
)
def test_invert(self):
vocabulary = ["one", "two", "three"]
single_sample_input_data = [2, 3, 1]
batch_input_data = [[2, 3, 1, 3]]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
"vocabulary": vocabulary,
"invert": True,
"output_mode": "int",
}
layer = layers.IndexLookup(**kwargs)
output = layer(single_sample_input_data)
self.assertEqual(
[w.decode("utf-8") for w in output.numpy()], ["one", "two", "[OOV]"]
)
output = layer(batch_input_data)
self.assertEqual(
[w.decode("utf-8") for w in output.numpy()[0]],
["one", "two", "[OOV]", "two"],
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string input dtype"
)
def test_saving(self):
# Test with adapt()
vocabulary = ["one", "two", "three"]
adapt_data = ["one", "one", "one", "two", "two", "three"]
batch_input_data = np.array([["one", "two", "four"]])
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
"output_mode": "int",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
model = models.Sequential(
[
layers.Input(shape=(None,), dtype="string"),
layer,
]
)
output_1 = model(batch_input_data)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path)
model = saving_api.load_model(path)
output_2 = model(batch_input_data)
self.assertAllClose(output_1, output_2)
# Test when vocabulary is provided
kwargs["vocabulary"] = vocabulary
layer = layers.IndexLookup(**kwargs)
model = models.Sequential(
[
layers.Input(shape=(None,), dtype="string"),
layer,
]
)
output_1 = model(batch_input_data)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path)
model = saving_api.load_model(path)
output_2 = model(batch_input_data)
self.assertAllClose(output_1, output_2)
def test_adapt_with_tf_data(self):
# Case: adapt + list inputs
adapt_data = tf_data.Dataset.from_tensor_slices(
["one", "one", "one", "two", "two", "three"]
).batch(2)
input_data = ["one", "two", "four"]
kwargs = {
"max_tokens": 7,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": "string",
}
layer = layers.IndexLookup(**kwargs)
layer.adapt(adapt_data)
self.assertEqual(
layer.get_vocabulary(), ["", "[OOV]", "one", "two", "three"]
)
self.assertEqual(
layer.get_vocabulary(include_special_tokens=False),
["one", "two", "three"],
)
output = layer(input_data)
self.assertEqual(list(output), [2, 3, 1])
if backend.backend() != "torch":
self.run_class_serialization_test(layer)
def test_max_tokens_less_than_two(self):
with self.assertRaisesRegex(
ValueError,
"If set, `max_tokens` must be greater than 1.",
):
layers.IndexLookup(
max_tokens=1,
num_oov_indices=1,
mask_token=None,
oov_token=None,
vocabulary_dtype="int64",
)
def test_max_tokens_none_with_pad_to_max_tokens(self):
with self.assertRaisesRegex(
ValueError,
"If pad_to_max_tokens is True, must set `max_tokens`.",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="int64",
pad_to_max_tokens=True,
)
def test_negative_num_oov_indices(self):
with self.assertRaisesRegex(
ValueError,
"`num_oov_indices` must be greater than or equal to 0.",
):
layers.IndexLookup(
max_tokens=10,
num_oov_indices=-1,
mask_token=None,
oov_token=None,
vocabulary_dtype="int64",
)
def test_invert_with_non_int_output_mode(self):
with self.assertRaisesRegex(
ValueError, r"`output_mode` must be `'int'` when `invert` is true."
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
invert=True,
output_mode="one_hot", # Invalid combination
)
def test_sparse_true_with_int_output_mode(self):
with self.assertRaisesRegex(
ValueError,
r"`sparse` may only be true if `output_mode` is `'one_hot'`",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
sparse=True,
output_mode="int", # Invalid combination
)
def test_idf_weights_set_with_non_tfidf_output_mode(self):
with self.assertRaisesRegex(
ValueError,
r"`idf_weights` should only be set if `output_mode` is `'tf_idf'`",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
idf_weights=[
0.5,
0.1,
0.3,
], # Should not be set for non-TF-IDF modes
output_mode="int",
)
def test_unrecognized_kwargs(self):
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword argument"
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
output_mode="int",
# This is an unrecognized argument
extra_arg=True,
)
def test_non_tf_idf_with_idf_weights(self):
with self.assertRaisesRegex(
ValueError,
"`idf_weights` should only be set if `output_mode` is",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
output_mode="multi_hot",
idf_weights=[
0.5,
0.1,
0.3,
], # idf_weights not valid for multi_hot mode
)
def test_vocabulary_file_does_not_exist(self):
with self.assertRaisesRegex(
ValueError,
"Vocabulary file path/to/missing_vocab.txt does not exist",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
output_mode="int",
# Nonexistent file path
vocabulary="path/to/missing_vocab.txt",
)
def test_repeated_tokens_in_vocabulary(self):
with self.assertRaisesRegex(
ValueError, "The passed vocabulary has at least one repeated term."
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token=None,
oov_token=None,
vocabulary_dtype="string",
vocabulary=["token", "token", "unique"],
)
def test_mask_token_in_wrong_position(self):
with self.assertRaisesRegex(
ValueError,
"Found reserved mask token at unexpected location in `vocabulary`.",
):
layers.IndexLookup(
num_oov_indices=1,
max_tokens=None,
mask_token="mask",
oov_token=None,
vocabulary_dtype="string",
vocabulary=[
"token",
"mask",
"unique",
], # 'mask' should be at the start if included explicitly
)
def test_ensure_known_vocab_size_without_vocabulary(self):
kwargs = {
"num_oov_indices": 1,
# Assume empty string or some default token is valid.
"mask_token": "",
# Assume [OOV] or some default token is valid.
"oov_token": "[OOV]",
"output_mode": "multi_hot",
"pad_to_max_tokens": False,
"vocabulary_dtype": "string",
"max_tokens": None,
}
layer = layers.IndexLookup(**kwargs)
# Try calling the layer without setting the vocabulary.
with self.assertRaisesRegex(
RuntimeError, "When using `output_mode=multi_hot` and"
):
input_data = ["sample", "data"]
layer(input_data)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/hashed_crossing.py | keras/src/layers/preprocessing/hashed_crossing.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
from keras.src.utils import backend_utils
from keras.src.utils import numerical_utils
from keras.src.utils import tf_utils
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.HashedCrossing")
class HashedCrossing(Layer):
"""A preprocessing layer which crosses features using the "hashing trick".
This layer performs crosses of categorical features using the "hashing
trick". Conceptually, the transformation can be thought of as:
`hash(concatenate(features)) % num_bins`.
This layer currently only performs crosses of scalar inputs and batches of
scalar inputs. Valid input shapes are `(batch_size, 1)`, `(batch_size,)` and
`()`.
**Note:** This layer wraps `tf.keras.layers.HashedCrossing`. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
num_bins: Number of hash bins.
output_mode: Specification for the output of the layer. Values can be
`"int"`, or `"one_hot"` configuring the layer as follows:
- `"int"`: Return the integer bin indices directly.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as `num_bins`, containing a 1 at the input's
bin index. Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"` mode and only valid
when using the TensorFlow backend. If `True`, returns
a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`.
**kwargs: Keyword arguments to construct a layer.
Examples:
**Crossing two scalar features.**
>>> layer = keras.layers.HashedCrossing(
... num_bins=5)
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
>>> feat2 = np.array([101, 101, 101, 102, 102])
>>> layer((feat1, feat2))
array([1, 4, 1, 1, 3])
**Crossing and one-hotting two scalar features.**
>>> layer = keras.layers.HashedCrossing(
... num_bins=5, output_mode='one_hot')
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
>>> feat2 = np.array([101, 101, 101, 102, 102])
>>> layer((feat1, feat2))
array([[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.]], dtype=float32)
"""
def __init__(
self,
num_bins,
output_mode="int",
sparse=False,
name=None,
dtype=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer HashedCrossing requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if output_mode == "int" and dtype is None:
dtype = "int64"
super().__init__(name=name, dtype=dtype)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse=True` can only be used with the TensorFlow backend."
)
argument_validation.validate_string_arg(
output_mode,
allowable_strings=("int", "one_hot"),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
self.num_bins = num_bins
self.output_mode = output_mode
self.sparse = sparse
self._allow_non_tensor_positional_args = True
self._convert_input_args = False
self.supports_jit = False
def compute_output_shape(self, input_shape):
if (
not len(input_shape) == 2
or not isinstance(input_shape[0], tuple)
or not isinstance(input_shape[1], tuple)
):
raise ValueError(
"Expected as input a list/tuple of 2 tensors. "
f"Received input_shape={input_shape}"
)
if input_shape[0][-1] != input_shape[1][-1]:
raise ValueError(
"Expected the two input tensors to have identical shapes. "
f"Received input_shape={input_shape}"
)
if not input_shape:
if self.output_mode == "int":
return ()
return (self.num_bins,)
if self.output_mode == "int":
return tuple(input_shape[0])
if self.output_mode == "one_hot" and input_shape[0][-1] != 1:
return tuple(input_shape[0]) + (self.num_bins,)
return tuple(input_shape[0])[:-1] + (self.num_bins,)
def call(self, inputs):
from keras.src.backend import tensorflow as tf_backend
self._check_at_least_two_inputs(inputs)
inputs = [tf_utils.ensure_tensor(x) for x in inputs]
self._check_input_shape_and_type(inputs)
# Uprank to rank 2 for the cross_hashed op.
first_shape = tuple(inputs[0].shape)
rank = len(first_shape)
if rank < 2:
inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs]
if rank < 1:
inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs]
# Perform the cross and convert to dense
outputs = tf.sparse.cross_hashed(inputs, self.num_bins)
outputs = tf.sparse.to_dense(outputs)
# tf.sparse.cross_hashed output shape will always have None dimensions.
# Re-apply the known static shape and downrank to match input rank.
if rank == 2:
outputs.set_shape(first_shape)
elif rank == 1:
outputs.set_shape(first_shape + (1,))
outputs = tf.squeeze(outputs, axis=1)
elif rank == 0:
outputs = tf.reshape(outputs, [])
# Encode outputs.
outputs = numerical_utils.encode_categorical_inputs(
outputs,
output_mode=self.output_mode,
depth=self.num_bins,
sparse=self.sparse,
dtype=self.compute_dtype,
backend_module=tf_backend,
)
return backend_utils.convert_tf_tensor(outputs, dtype=self.dtype)
def get_config(self):
return {
"num_bins": self.num_bins,
"output_mode": self.output_mode,
"sparse": self.sparse,
"name": self.name,
"dtype": self.dtype,
}
def _check_at_least_two_inputs(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"`HashedCrossing` should be called on a list or tuple of "
f"inputs. Received: inputs={inputs}"
)
if len(inputs) < 2:
raise ValueError(
"`HashedCrossing` should be called on at least two inputs. "
f"Received: inputs={inputs}"
)
def _check_input_shape_and_type(self, inputs):
first_shape = tuple(inputs[0].shape)
rank = len(first_shape)
if rank > 2 or (rank == 2 and first_shape[-1] != 1):
raise ValueError(
"All `HashedCrossing` inputs should have shape `()`, "
"`(batch_size)` or `(batch_size, 1)`. "
f"Received: inputs={inputs}"
)
if not all(tuple(x.shape) == first_shape for x in inputs[1:]):
raise ValueError(
"All `HashedCrossing` inputs should have equal shape. "
f"Received: inputs={inputs}"
)
if any(
isinstance(x, (tf.RaggedTensor, tf.SparseTensor)) for x in inputs
):
raise ValueError(
"All `HashedCrossing` inputs should be dense tensors. "
f"Received: inputs={inputs}"
)
if not all(
tf.as_dtype(x.dtype).is_integer or x.dtype == tf.string
for x in inputs
):
raise ValueError(
"All `HashedCrossing` inputs should have an integer or "
f"string dtype. Received: inputs={inputs}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/discretization.py | keras/src/layers/preprocessing/discretization.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.utils import argument_validation
from keras.src.utils import numerical_utils
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.Discretization")
class Discretization(DataLayer):
"""A preprocessing layer which buckets continuous features by ranges.
This layer will place each element of its input data into one of several
contiguous ranges and output an integer index indicating which range each
element was placed in.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
Any array of dimension 2 or higher.
Output shape:
Same as input shape.
Arguments:
bin_boundaries: A list of bin boundaries.
The leftmost and rightmost bins
will always extend to `-inf` and `inf`,
so `bin_boundaries=[0., 1., 2.]`
generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`,
and `[2., +inf)`.
If this option is set, `adapt()` should not be called.
num_bins: The integer number of bins to compute.
If this option is set, `bin_boundaries` should not be set and
`adapt()` should be called to learn the bin boundaries.
epsilon: Error tolerance, typically a small fraction
close to zero (e.g. 0.01). Higher values of epsilon increase
the quantile approximation, and hence result in more
unequal buckets, but could improve performance
and resource consumption.
output_mode: Specification for the output of the layer.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or
`"count"` configuring the layer as follows:
- `"int"`: Return the discretized bin indices directly.
- `"one_hot"`: Encodes each individual element in the
input into an array the same size as `num_bins`,
containing a 1 at the input's bin
index. If the last dimension is size 1, will encode on that
dimension. If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a
single array the same size as `num_bins`,
containing a 1 for each bin index
index present in the sample.
Treats the last dimension as the sample
dimension, if input shape is `(..., sample_length)`,
output shape will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the bin index appeared
in the sample.
Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
and `"count"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to `False`.
Examples:
Discretize float values based on provided buckets.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = Discretization(bin_boundaries=[0., 1., 2.])
>>> layer(input)
array([[0, 2, 3, 1],
[1, 3, 2, 1]])
Discretize float values based on a number of buckets to compute.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = Discretization(num_bins=4, epsilon=0.01)
>>> layer.adapt(input)
>>> layer(input)
array([[0, 2, 3, 2],
[1, 3, 3, 1]])
"""
def __init__(
self,
bin_boundaries=None,
num_bins=None,
epsilon=0.01,
output_mode="int",
sparse=False,
dtype=None,
name=None,
):
super().__init__(name=name, dtype=dtype)
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
raise ValueError(
f"`sparse=True` cannot be used with backend {backend.backend()}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse=True` may only be used if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, or `'count'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if num_bins is not None and num_bins < 0:
raise ValueError(
"`num_bins` must be greater than or equal to 0. "
f"Received: `num_bins={num_bins}`"
)
if num_bins is not None and bin_boundaries is not None:
raise ValueError(
"Both `num_bins` and `bin_boundaries` should not be set. "
f"Received: `num_bins={num_bins}` and "
f"`bin_boundaries={bin_boundaries}`"
)
if num_bins is None and bin_boundaries is None:
raise ValueError(
"You need to set either `num_bins` or `bin_boundaries`."
)
self.bin_boundaries = bin_boundaries
self.num_bins = num_bins
self.epsilon = epsilon
self.output_mode = output_mode
self.sparse = sparse
if self.bin_boundaries:
self.summary = None
else:
self.summary = np.array([[], []], dtype="float32")
@property
def input_dtype(self):
return backend.floatx()
@property
def output_dtype(self):
return self.compute_dtype if self.output_mode != "int" else "int32"
def adapt(self, data, steps=None):
"""Computes bin boundaries from quantiles in a input dataset.
Calling `adapt()` on a `Discretization` layer is an alternative to
passing in a `bin_boundaries` argument during construction. A
`Discretization` layer should always be either adapted over a dataset or
passed `bin_boundaries`.
During `adapt()`, the layer will estimate the quantile boundaries of the
input dataset. The number of quantiles can be controlled via the
`num_bins` argument, and the error tolerance for quantile boundaries can
be controlled via the `epsilon` argument.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
if self.num_bins is None:
raise ValueError(
"Cannot adapt a Discretization layer that has been initialized "
"with `bin_boundaries`, use `num_bins` instead."
)
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
self.update_state(data)
self.finalize_state()
def update_state(self, data):
data = np.array(data).astype("float32")
summary = summarize(data, self.epsilon)
self.summary = merge_summaries(summary, self.summary, self.epsilon)
def finalize_state(self):
if self.num_bins is None:
return
self.bin_boundaries = get_bin_boundaries(
self.summary, self.num_bins
).tolist()
def reset_state(self):
if self.num_bins is None:
return
self.summary = np.array([[], []], dtype="float32")
def compute_output_spec(self, inputs):
return backend.KerasTensor(shape=inputs.shape, dtype=self.output_dtype)
def load_own_variables(self, store):
if len(store) == 1:
# Legacy format case
self.summary = store["0"]
return
def call(self, inputs):
if self.bin_boundaries is None:
raise ValueError(
"You need to either pass the `bin_boundaries` argument at "
"construction time or call `adapt(dataset)` before you can "
"start using the `Discretization` layer."
)
indices = self.backend.numpy.digitize(inputs, self.bin_boundaries)
return numerical_utils.encode_categorical_inputs(
indices,
output_mode=self.output_mode,
depth=len(self.bin_boundaries) + 1,
dtype=self.output_dtype,
sparse=self.sparse,
backend_module=self.backend,
)
def get_config(self):
return {
"bin_boundaries": self.bin_boundaries,
"num_bins": self.num_bins,
"epsilon": self.epsilon,
"output_mode": self.output_mode,
"sparse": self.sparse,
"name": self.name,
"dtype": self.dtype,
}
@classmethod
def from_config(cls, config, custom_objects=None):
if (
config.get("bin_boundaries", None) is not None
and config.get("num_bins", None) is not None
):
# After `adapt` was called, both `bin_boundaries` and `num_bins` are
# populated, but `__init__` won't let us create a new layer with
# both `bin_boundaries` and `num_bins`. We therefore apply
# `bin_boundaries` after creation.
config = config.copy()
bin_boundaries = config.pop("bin_boundaries")
discretization = cls(**config)
discretization.bin_boundaries = bin_boundaries
return discretization
return cls(**config)
def summarize(values, epsilon):
"""Reduce a 1D sequence of values to a summary.
This algorithm is based on numpy.quantiles but modified to allow for
intermediate steps between multiple data sets. It first finds the target
number of bins as the reciprocal of epsilon and then takes the individual
values spaced at appropriate intervals to arrive at that target.
The final step is to return the corresponding counts between those values
If the target num_bins is larger than the size of values, the whole array is
returned (with weights of 1).
Args:
values: 1D `np.ndarray` to be summarized.
epsilon: A `'float32'` that determines the approximate desired
precision.
Returns:
A 2D `np.ndarray` that is a summary of the inputs. First column is the
interpolated partition values, the second is the weights (counts).
"""
values = np.reshape(values, [-1])
values = np.sort(values)
elements = np.size(values)
num_buckets = 1.0 / epsilon
increment = elements / num_buckets
start = increment
step = max(increment, 1)
boundaries = values[int(start) :: int(step)]
weights = np.ones_like(boundaries)
weights = weights * step
return np.stack([boundaries, weights])
def merge_summaries(prev_summary, next_summary, epsilon):
"""Weighted merge sort of summaries.
Given two summaries of distinct data, this function merges (and compresses)
them to stay within `epsilon` error tolerance.
Args:
prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.
next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.
epsilon: A float that determines the approximate desired precision.
Returns:
A 2-D `np.ndarray` that is a merged summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
merged = np.concatenate((prev_summary, next_summary), axis=1)
merged = np.take(merged, np.argsort(merged[0]), axis=1)
return compress_summary(merged, epsilon)
def get_bin_boundaries(summary, num_bins):
return compress_summary(summary, 1.0 / num_bins)[0, :-1]
def compress_summary(summary, epsilon):
"""Compress a summary to within `epsilon` accuracy.
The compression step is needed to keep the summary sizes small after
merging, and also used to return the final target boundaries. It finds the
new bins based on interpolating cumulative weight percentages from the large
summary. Taking the difference of the cumulative weights from the previous
bin's cumulative weight will give the new weight for that bin.
Args:
summary: 2D `np.ndarray` summary to be compressed.
epsilon: A `'float32'` that determines the approximate desired
precision.
Returns:
A 2D `np.ndarray` that is a compressed summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
if summary.shape[1] * epsilon < 1:
return summary
percents = epsilon + np.arange(0.0, 1.0, epsilon)
cum_weights = summary[1].cumsum()
cum_weight_percents = cum_weights / cum_weights[-1]
new_bins = np.interp(percents, cum_weight_percents, summary[0])
cum_weights = np.interp(percents, cum_weight_percents, cum_weights)
new_weights = cum_weights - np.concatenate(
(np.array([0]), cum_weights[:-1])
)
summary = np.stack((new_bins, new_weights))
return summary.astype("float32")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/hashed_crossing_test.py | keras/src/layers/preprocessing/hashed_crossing_test.py | import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.testing.test_utils import named_product
class HashedCrossingTest(testing.TestCase):
def test_basics(self):
self.run_layer_test(
layers.HashedCrossing,
init_kwargs={
"num_bins": 3,
"output_mode": "int",
},
input_data=(np.array([1, 2]), np.array([4, 5])),
expected_output_shape=(2,),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
)
self.run_layer_test(
layers.HashedCrossing,
init_kwargs={"num_bins": 4, "output_mode": "one_hot"},
input_data=(np.array([1, 2]), np.array([4, 5])),
expected_output_shape=(2, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
)
@parameterized.named_parameters(
named_product(
sparse=(
[True, False] if backend.backend() == "tensorflow" else [False]
)
)
)
def test_correctness(self, sparse):
layer = layers.HashedCrossing(num_bins=5)
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
output = layer((feat1, feat2))
self.assertAllClose(tf.constant([1, 4, 1, 1, 3]), output)
layer = layers.HashedCrossing(
num_bins=5, output_mode="one_hot", sparse=sparse
)
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
output = layer((feat1, feat2))
self.assertSparse(output, sparse)
self.assertAllClose(
np.array(
[
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
),
output,
)
def test_tf_data_compatibility(self):
layer = layers.HashedCrossing(num_bins=5)
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
ds = (
tf.data.Dataset.from_tensor_slices((feat1, feat2))
.batch(5)
.map(lambda x1, x2: layer((x1, x2)))
)
output = next(iter(ds)).numpy()
self.assertAllClose(np.array([1, 4, 1, 1, 3]), output)
def test_static_shape_preserved(self):
layer = layers.HashedCrossing(num_bins=5)
def call_layer(x1, x2):
result = layer((x1, x2))
self.assertEqual(result.shape, (5,))
return result
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
ds = (
tf.data.Dataset.from_tensor_slices((feat1, feat2))
.batch(5, drop_remainder=True)
.map(call_layer)
)
next(iter(ds))
def test_unsupported_shape_input_fails(self):
with self.assertRaisesRegex(ValueError, "inputs should have shape"):
layers.HashedCrossing(num_bins=10)(
(np.array([[[1.0]]]), np.array([[[1.0]]]))
)
@pytest.mark.xfail
def test_cross_output_dtype(self):
input_1, input_2 = np.array([1]), np.array([1])
layer = layers.HashedCrossing(num_bins=2)
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "int64")
layer = layers.HashedCrossing(num_bins=2, dtype="int32")
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "int32")
layer = layers.HashedCrossing(num_bins=2, output_mode="one_hot")
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "float32")
layer = layers.HashedCrossing(
num_bins=2, output_mode="one_hot", dtype="float64"
)
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "float64")
def test_non_list_input_fails(self):
with self.assertRaisesRegex(ValueError, "should be called on a list"):
layers.HashedCrossing(num_bins=10)(np.array(1))
def test_single_input_fails(self):
with self.assertRaisesRegex(ValueError, "at least two inputs"):
layers.HashedCrossing(num_bins=10)([np.array(1)])
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Need sparse tensor support.",
)
def test_sparse_input_fails(self):
with self.assertRaisesRegex(
ValueError, "inputs should be dense tensors"
):
sparse_in = tf.sparse.from_dense(np.array([1]))
layers.HashedCrossing(num_bins=10)((sparse_in, sparse_in))
def test_float_input_fails(self):
with self.assertRaisesRegex(
ValueError, "should have an integer or string"
):
layers.HashedCrossing(num_bins=10)(
(np.array([1.0]), np.array([1.0]))
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Need string tensor support.",
)
def test_tf_string(self):
layer = layers.HashedCrossing(num_bins=10)
feat1 = tf.constant("A")
feat2 = tf.constant(101)
outputs = layer((feat1, feat2))
self.assertAllClose(outputs, 1)
layer = layers.HashedCrossing(num_bins=5, output_mode="one_hot")
feat1 = tf.constant(["A", "B", "A", "B", "A"])
feat2 = tf.constant([101, 101, 101, 102, 102])
self.assertAllClose(
tf.constant(
[
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
),
layer((feat1, feat2)),
)
layer = layers.HashedCrossing(num_bins=5)
feat1 = tf.constant(["A", "B", "A", "B", "A"])
feat2 = tf.constant([101, 101, 101, 102, 102])
self.assertAllClose(tf.constant([1, 4, 1, 1, 3]), layer((feat1, feat2)))
layer = layers.HashedCrossing(
num_bins=5, output_mode="one_hot", sparse=True
)
cloned_layer = layers.HashedCrossing.from_config(layer.get_config())
feat1 = tf.constant([["A"], ["B"], ["A"], ["B"], ["A"]])
feat2 = tf.constant([[101], [101], [101], [102], [102]])
original_outputs = layer((feat1, feat2))
cloned_outputs = cloned_layer((feat1, feat2))
self.assertAllClose(
tf.sparse.to_dense(cloned_outputs),
tf.sparse.to_dense(original_outputs),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/string_lookup.py | keras/src/layers/preprocessing/string_lookup.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.index_lookup import IndexLookup
from keras.src.utils import backend_utils
from keras.src.utils.module_utils import tensorflow as tf
if backend.backend() == "torch":
import torch
@keras_export("keras.layers.StringLookup")
class StringLookup(IndexLookup):
"""A preprocessing layer that maps strings to (possibly encoded) indices.
This layer translates a set of arbitrary strings into integer output via a
table-based vocabulary lookup. This layer will perform no splitting or
transformation of input strings. For a layer that can split and tokenize
natural language, see the `keras.layers.TextVectorization` layer.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual strings tokens, and create a
vocabulary from them. If the vocabulary is capped in size, the most frequent
tokens will be used to create the vocabulary and all others will be treated
as out-of-vocabulary (OOV).
There are two possible output modes for the layer. When `output_mode` is
`"int"`, input strings are converted to their index in the vocabulary (an
integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input strings
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode`
is `"int"`, the vocabulary will begin with the mask token (if set), followed
by OOV indices, followed by the rest of the vocabulary. When `output_mode`
is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with
OOV indices and instances of the mask token will be dropped.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
using this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of
the vocabulary. Note that this size includes the OOV
and mask tokens. Defaults to `None`.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are modulated to
determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling
the layer. Defaults to `1`.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in the vocabulary and mapped to index
0.
In other output modes, the token will not appear in the vocabulary
and instances of the mask token in the input will be dropped.
If set to `None`, no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to `"[UNK]"`.
vocabulary: Optional. Either an array of strings or a string path to a
text file. If passing an array, you can pass a tuple, list, 1D NumPy
array, or 1D tensor containing the string vocabulary terms.
If passing a file path, the file should contain one line per term in
the vocabulary. If this argument is set, there is no need to
`adapt()` the layer.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D NumPy array, or 1D tensor or the same length
as the vocabulary, containing the floating point inverse document
frequency weights, which will be multiplied by per sample term
counts for the final TF-IDF weight.
If the `vocabulary` argument is set and `output_mode` is `"tf_idf"`,
this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the vocabulary indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary,
containing a 1 at the element index. If the last dimension
is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array the same size as the vocabulary containing a 1 for each
vocabulary term present in the sample.
Treats the last dimension as the sample dimension, if the input
shape is `(..., sample_length)`, the output shape will be
`(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is
applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only output up to rank 2
is supported. Defaults to `"int"`.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab)
>>> layer(data)
array([[1, 3, 4],
[4, 0, 2]])
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by
analyzing the dataset.
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
['[UNK]', 'd', 'z', 'c', 'b', 'a']
Note that the OOV token `"[UNK]"` has been added to the vocabulary.
The remaining tokens are sorted by frequency
(`"d"`, which has 2 occurrences, is first) then by inverse sort order.
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer(data)
array([[5, 3, 1],
[1, 2, 4]])
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV
indices. When a layer is created with more than one OOV index, any OOV
values are hashed into the number of OOV buckets, distributing OOV values in
a deterministic fashion across the set.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["m", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
array([[2, 4, 5],
[0, 1, 3]])
Note that the output for OOV value 'm' is 0, while the output for OOV value
`"z"` is 1. The in-vocab terms have their output index increased by 1 from
earlier examples (a maps to 2, etc) in order to make space for the extra OOV
value.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = ["a", "b", "c", "d", "z"]
>>> layer = StringLookup(vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=int64)
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=int64)
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(vocabulary=vocab, output_mode='count')
>>> layer(data)
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=int64)
**TF-IDF output**
Configure the layer with `output_mode="tf_idf"`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be
provided along with the vocabulary. Note that the `idf_weight` for OOV
values will default to the average of all idf weights passed in.
>>> vocab = ["a", "b", "c", "d"]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
To specify the idf weights for OOV values, you will need to pass the entire
vocabulary including the leading OOV token.
>>> vocab = ["[UNK]", "a", "b", "c", "d"]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
When adapting the layer in `"tf_idf"` mode, each input sample will be
considered a document, and IDF weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to strings using this layer.
(You can also use `adapt()` with `inverse=True`, but for simplicity we'll
pass the vocab in this example.)
>>> vocab = ["a", "b", "c", "d"]
>>> data = [[1, 3, 4], [4, 0, 2]]
>>> layer = StringLookup(vocabulary=vocab, invert=True)
>>> layer(data)
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)
Note that the first index corresponds to the OOV token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab)
>>> i_layer = StringLookup(vocabulary=vocab, invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)
In this example, the input value `"z"` resulted in an output of `"[UNK]"`,
since 1000 was not in the vocabulary - it got represented as an OOV, and all
OOV values are returned as `"[UNK]"` in the inverse layer. Also, note that
for the inverse to work, you must have already set the forward layer
vocabulary either directly or via `adapt()` before calling
`get_vocabulary()`.
"""
def __init__(
self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[UNK]",
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
pad_to_max_tokens=False,
sparse=False,
encoding="utf-8",
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer StringLookup requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse=True` can only be used with the TensorFlow backend."
)
self.encoding = encoding
super().__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
idf_weights=idf_weights,
invert=invert,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens,
sparse=sparse,
name=name,
vocabulary_dtype="string",
**kwargs,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def adapt(self, data, steps=None):
"""Computes a vocabulary of terms from tokens in a dataset.
Calling `adapt()` on a `StringLookup` layer is an alternative to passing
in a precomputed vocabulary on construction via the `vocabulary`
argument. A `StringLookup` layer should always be either adapted over a
dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary will be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of strings,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
super().adapt(data, steps=steps)
# Overridden methods from IndexLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
vocabulary = vocabulary.numpy()
return np.array(
[tf.compat.as_text(x, self.encoding) for x in vocabulary]
)
def get_config(self):
config = {"encoding": self.encoding}
base_config = super().get_config()
# There is only one valid dtype for strings, so we don't expose this.
del base_config["vocabulary_dtype"]
return {**base_config, **config}
def call(self, inputs):
is_torch_backend = backend.backend() == "torch"
# Handle input conversion
inputs_for_processing = inputs
was_tf_input = isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)
)
if is_torch_backend and isinstance(inputs, torch.Tensor):
inputs_for_processing = tf.convert_to_tensor(
inputs.detach().cpu().numpy()
)
elif isinstance(inputs, (np.ndarray, list, tuple)):
inputs_for_processing = tf.convert_to_tensor(inputs)
elif not was_tf_input:
inputs_for_processing = tf.convert_to_tensor(
backend.convert_to_numpy(inputs)
)
output = super().call(inputs_for_processing)
# Handle torch backend output conversion
if is_torch_backend and isinstance(
inputs, (torch.Tensor, np.ndarray, list, tuple)
):
numpy_outputs = output.numpy()
if self.invert:
return [n.decode(self.encoding) for n in numpy_outputs]
else:
return torch.from_numpy(numpy_outputs)
# other backends
if not was_tf_input:
output = backend_utils.convert_tf_tensor(output)
return output
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/text_vectorization_test.py | keras/src/layers/preprocessing/text_vectorization_test.py | import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import Sequential
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
class TextVectorizationTest(testing.TestCase, parameterized.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["one", "two"],
output_sequence_length=5,
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(["foo bar", "bar baz", "baz bada boom"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_fixed_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_set_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.set_vocabulary(["baz", "bar", "foo"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string input dtype"
)
def test_save_load_with_ngrams_flow(self):
input_data = np.array(["foo bar", "bar baz", "baz bada boom"])
model = Sequential(
[
layers.Input(dtype="string", shape=(1,)),
layers.TextVectorization(ngrams=(1, 2)),
]
)
model.layers[0].adapt(input_data)
output = model(input_data)
temp_filepath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(temp_filepath)
model = saving.load_model(temp_filepath)
self.assertAllClose(output, model(input_data))
def test_tf_data_compatibility(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
# Test adapt flow
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
next(iter(ds)).numpy()
@parameterized.named_parameters(
[
("from_ragged", "whitespace"), # intermediate tensor is ragged
("from_dense", None), # intermediate tensor is dense
]
)
def test_static_output_sequence_length(self, split):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
split=split,
vocabulary=["baz", "bar", "foo"],
)
if split:
input_data = [["foo qux bar"], ["qux baz"]]
else:
input_data = [["foo"], ["baz"]]
def call_layer(x):
result = layer(x)
self.assertEqual(result.shape, (None, 4))
return result
ds = (
tf_data.Dataset.from_tensor_slices(input_data)
.batch(2)
.map(call_layer)
)
next(iter(ds))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
)
def test_tf_as_first_sequential_layer(self):
layer = layers.TextVectorization(
max_tokens=10,
output_mode="int",
output_sequence_length=3,
)
layer.set_vocabulary(["baz", "bar", "foo"])
model = models.Sequential(
[
layer,
layers.Embedding(5, 4),
]
)
model(backend.convert_to_tensor([["foo qux bar"], ["qux baz"]]))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires ragged tensors."
)
def test_ragged_tensor(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
)
input_data = [["foo qux bar"], ["qux baz"], ["foo"]]
output = layer(input_data)
self.assertIsInstance(output, tf.RaggedTensor)
self.assertEqual(output.shape, (3, None))
self.assertEqual(output.to_list(), [[4, 1, 3], [1, 2], [4]])
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires ragged tensors."
)
def test_ragged_tensor_output_length(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
output_sequence_length=2,
)
input_data = [["foo qux bar"], ["qux baz"], ["foo"]]
output = layer(input_data)
self.assertIsInstance(output, tf.RaggedTensor)
self.assertEqual(output.shape, (3, None))
self.assertEqual(output.to_list(), [[4, 1], [1, 2], [4]])
@pytest.mark.skipif(
backend.backend() == "tensorflow",
reason="Verify raises exception for non-TF backends",
)
def test_raises_exception_ragged_tensor(self):
with self.assertRaises(ValueError):
_ = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
)
def test_multi_hot_output(self):
layer = layers.TextVectorization(
output_mode="multi_hot", vocabulary=["foo", "bar", "baz"]
)
input_data = [["foo bar"], ["baz foo foo"]]
output = layer(input_data)
"""
First batch
Tokens present: ["foo", "bar"]
For each token in vocabulary:
foo (index 1): present -> 1
bar (index 2): present -> 1
baz (index 3): absent -> 0
Result: [0, 1, 1, 0]
Second batch
Tokens: ["baz", "foo", "foo"]
For each token in vocabulary:
foo (index 1): present -> 1
bar (index 2): absent -> 0
baz (index 3): present -> 1
Result: [0, 1, 0, 1]
"""
self.assertAllClose(output, [[0, 1, 1, 0], [0, 1, 0, 1]])
def test_output_mode_count_output(self):
layer = layers.TextVectorization(
output_mode="count", vocabulary=["foo", "bar", "baz"]
)
output = layer(["foo bar", "baz foo foo"])
self.assertAllClose(output, [[0, 1, 1, 0], [0, 2, 0, 1]])
def test_output_mode_tf_idf_output(self):
layer = layers.TextVectorization(
output_mode="tf_idf",
vocabulary=["foo", "bar", "baz"],
idf_weights=[0.3, 0.5, 0.2],
)
output = layer(["foo bar", "baz foo foo"])
self.assertAllClose(
output, [[0.0, 0.3, 0.5, 0.0], [0.0, 0.6, 0.0, 0.2]]
)
def test_lower_and_strip_punctuation_standardization(self):
layer = layers.TextVectorization(
standardize="lower_and_strip_punctuation",
vocabulary=["hello", "world", "this", "is", "nice", "test"],
)
output = layer(["Hello, World!. This is just a nice test!"])
self.assertTrue(backend.is_tensor(output))
# test output sequence length, taking first batch.
self.assertEqual(len(output[0]), 8)
self.assertAllEqual(output, [[2, 3, 4, 5, 1, 1, 6, 7]])
def test_lower_standardization(self):
layer = layers.TextVectorization(
standardize="lower",
vocabulary=[
"hello,",
"hello",
"world",
"this",
"is",
"nice",
"test",
],
)
output = layer(["Hello, World!. This is just a nice test!"])
self.assertTrue(backend.is_tensor(output))
self.assertEqual(len(output[0]), 8)
"""
The input is lowercased and tokenized into words. The vocab is:
{0: '',
1: '[UNK]',
2: 'hello,',
3: 'hello',
4: 'world',
5: 'this',
6: 'is',
7: 'nice',
8: 'test'}
"""
self.assertAllEqual(output, [[2, 1, 5, 6, 1, 1, 7, 1]])
def test_char_splitting(self):
layer = layers.TextVectorization(
split="character", vocabulary=list("abcde"), output_mode="int"
)
output = layer(["abcf"])
self.assertTrue(backend.is_tensor(output))
self.assertEqual(len(output[0]), 4)
self.assertAllEqual(output, [[2, 3, 4, 1]])
def test_custom_splitting(self):
def custom_split(text):
return tf.strings.split(text, sep="|")
layer = layers.TextVectorization(
split=custom_split,
vocabulary=["foo", "bar", "foobar"],
output_mode="int",
)
output = layer(["foo|bar"])
self.assertTrue(backend.is_tensor(output))
# after custom split, the outputted index should be the last
# token in the vocab.
self.assertAllEqual(output, [[4]])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/index_lookup.py | keras/src/layers/preprocessing/index_lookup.py | import collections
import numpy as np
from keras.src import backend
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
from keras.src.utils import argument_validation
from keras.src.utils import numerical_utils
from keras.src.utils import tf_utils
from keras.src.utils.module_utils import tensorflow as tf
class IndexLookup(Layer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output
via a table-based lookup, with optional out-of-vocabulary handling. This is
the basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer.
If `None`, there is no cap on the size of the vocabulary.
Note that this size includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are hashed to determine
their OOV value. If this value is 0,
OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs.
When `output_mode` is `"int"`,
the token is included in vocabulary and mapped to index 0.
In other output modes, the token will not appear in the vocabulary
and instances of the mask token in the input will be dropped.
If set to `None`, no mask term will be added.
oov_token: Only used when `invert` is `True`.
The token to return for OOV indices.
vocabulary: Optional. Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D numpy array,
or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms.
For example, `"int64"` or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D numpy array, or 1D tensor or the same length
as the vocabulary, containing the floating point
inverse document frequency weights, which will be multiplied
by per sample term counts for the final TF-IDF
weight. If the `vocabulary` argument is set, and `output_mode`
is `"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1
at the element index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into
a single array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`, output shape will
be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count
of the number of times the token at that index appeared
in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
Defaults to `"int"`.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have its
feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
`"count"` and `"tf-idf"` output modes.
If `True`, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to `False`.
"""
def __init__(
self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}"
)
if pad_to_max_tokens and max_tokens is None:
raise ValueError(
"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}"
)
if num_oov_indices < 0:
raise ValueError(
"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}"
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if invert and output_mode != "int":
raise ValueError(
"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
if idf_weights is not None and output_mode != "tf_idf":
raise ValueError(
"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}"
)
super().__init__(name=name)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = tf.as_dtype(vocabulary_dtype).name
self._frozen_vocab_size = kwargs.pop("vocabulary_size", None)
# Remember original `vocabulary` as `input_vocabulary` for serialization
# via `get_config`. However, if `vocabulary` is a file path or a URL, we
# serialize the vocabulary as an asset and clear the original path/URL.
self.input_vocabulary = (
vocabulary if not isinstance(vocabulary, str) else None
)
self.input_idf_weights = idf_weights
# We set this hidden attr to
# persist the fact that we have have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
kwargs.pop("trainable", None)
kwargs.pop("dtype", None)
if kwargs:
raise ValueError(f"Unrecognized keyword argument(s): {kwargs}")
if invert:
self._key_dtype = "int64"
self._value_dtype = self.vocabulary_dtype
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = self.vocabulary_dtype
self._value_dtype = "int64"
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max
# ints will be dropped from the bincount op.
mask_value = (
0
if self.output_mode == "int"
else tf.as_dtype(self._value_dtype).max
)
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error
# out during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the
# default value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we have multiple OOV values, we need to do a further
# hashing step; to make this easier, we set the OOV value to -1.
# (This lets us do a vectorized add and cast to boolean to
# determine locations where we need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(
mask_value, self._value_dtype
)
if self.output_mode == "tf_idf":
if self._has_input_vocabulary and idf_weights is None:
raise ValueError(
"When specifying the `vocabulary` argument, "
"in TF-IDF output mode, the `idf_weights` argument "
"must also be provided."
)
if idf_weights is not None:
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will
# expect to find and restore a lookup_table attribute on the layer.
# This table needs to be uninitialized as a StaticHashTable cannot
# be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not receive a vocab on construction.
if not self._has_input_vocabulary:
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
if self.output_mode == "tf_idf":
self.token_document_counts = (
tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
)
self.num_documents = tf.Variable(
0, dtype="int64", trainable=False
)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include mask and OOV tokens,
and a term's index in the vocabulary
will equal the term's index when calling the layer.
If `False`, the returned vocabulary will not include
any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices
# [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (
self._tensor_vocab_to_numpy(vocab),
indices.numpy(),
)
lookup = collections.defaultdict(
lambda: self.oov_token, zip(indices, vocab)
)
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == "int":
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index() :]
if self.vocabulary_dtype == "string":
return [
i.decode("utf-8") if isinstance(i, bytes) else i for i in vocab
]
else:
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional mask and oov
indices.
"""
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"sparse": self.sparse,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": listify_tensors(self.input_idf_weights),
"vocabulary": listify_tensors(self.input_vocabulary),
"vocabulary_size": self._frozen_vocab_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _record_vocabulary_size(self):
self._ensure_vocab_size_unchanged()
with tf.init_scope():
self._frozen_vocab_size = self.vocabulary_size()
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list,
1D numpy array, or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor
of inverse document frequency weights with equal
length to vocabulary. Must be set if `output_mode`
is `"tf_idf"`. Should not be set otherwise.
"""
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"`idf_weights` must be set if output_mode is 'tf_idf'."
)
elif idf_weights is not None:
raise ValueError(
"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}"
)
if isinstance(vocabulary, str):
if serialization_lib.in_safe_mode():
raise ValueError(
"Requested the loading of a vocabulary file outside of the "
"model archive. This carries a potential risk of loading "
"arbitrary and sensitive files and thus it is disallowed "
"by default. If you trust the source of the artifact, you "
"can override this error by passing `safe_mode=False` to "
"the loading function, or calling "
"`keras.config.enable_unsafe_deserialization(). "
f"Vocabulary file: '{vocabulary}'"
)
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
f"Vocabulary file {vocabulary} does not exist."
)
if self.output_mode == "tf_idf":
raise ValueError(
"output_mode `'tf_idf'` does not support loading a "
"vocabulary from file."
)
self.lookup_table = self._lookup_table_from_file(vocabulary)
self._record_vocabulary_size()
return
if not tf.executing_eagerly() and (
tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)
):
raise RuntimeError(
f"Cannot set a tensor vocabulary on layer {self.name} "
"when not executing eagerly. "
"Create this layer or call `set_vocabulary()` "
"outside of any traced function."
)
# TODO(mattdangerw): for better performance we should rewrite this
# entire function to operate on tensors and convert vocabulary to a
# tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary. "
f"Received: vocabulary={vocabulary}"
)
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = [self.mask_token] * oov_start + [
self.oov_token
] * self.num_oov_indices
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start]
)
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError(
"The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
f"are: {repeated_tokens}"
)
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}"
)
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if (
self.oov_token is not None
and self.invert
and self.oov_token in tokens
):
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}"
)
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab "
f"size. Received vocabulary size is {new_vocab_size}; "
f"`max_tokens` is {self.max_tokens}."
)
self.lookup_table = self._lookup_table_from_tokens(tokens)
self._record_vocabulary_size()
if self.output_mode == "tf_idf" and idf_weights is not None:
if len(vocabulary) != len(idf_weights):
raise ValueError(
"`idf_weights` must be the same length as vocabulary. "
f"len(idf_weights) is {len(idf_weights)}; "
f"len(vocabulary) is {len(vocabulary)}"
)
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array. "
f"Received: type(idf_weights)={type(idf_weights)}"
)
# If the passed vocabulary has no special tokens, we need to pad the
# front of idf_weights. We don't have real document frequencies for
# these tokens so we will use an average of all idf_weights passed
# in as a reasonable default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our
# total vocab size, we need to pad the back of idf_weights with
# zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = (
self.max_tokens - front_padding - len(idf_weights)
)
else:
back_padding = 0
weights = np.pad(
idf_weights,
(front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value),
)
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights = tf.Variable(
weights,
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
def get_build_config(self):
return {}
def build_from_config(self, config):
self.build(None)
@property
def compute_dtype(self):
return self.vocabulary_dtype
@property
def variable_dtype(self):
return self.vocabulary_dtype
def compute_output_shape(self, input_shape):
if self.output_mode == "int":
return input_shape
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
return (input_shape[0], depth)
def compute_output_spec(self, inputs):
if self.output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
output_shape = self.compute_output_shape(inputs.shape)
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, steps=None):
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
f"Cannot adapt layer '{self.name}' after setting a static "
"vocabulary via `vocabulary` argument or "
"`set_vocabulary()` method."
)
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, 0)
if data.shape.rank == 1:
# Expand dims on axis 0 for tf-idf. A 1-d tensor
# is a single document.
data = tf.expand_dims(data, 0)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(
tokens, counts + self.token_counts.lookup(tokens)
)
if self.output_mode == "tf_idf":
# Dedupe each row of our dataset.
if isinstance(data, tf.RaggedTensor):
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
else:
deduped_doc_data = [tf.unique(x)[0] for x in data]
deduped_doc_data = tf.concat(deduped_doc_data, axis=0)
# Flatten and count tokens.
tokens, counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, counts + self.token_document_counts.lookup(tokens)
)
if isinstance(data, tf.RaggedTensor):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(
tf.shape(data, out_type="int64")[0]
)
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == "tf_idf":
self.idf_weights_const = self.idf_weights.value()
self._record_vocabulary_size()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype)
)
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype)
)
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break
# ties by sorting the tokens themselves. Tensorflow has no ops for
# sorting strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == "tf_idf":
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(
token_document_counts, self.num_documents
)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV
# tokens. We cannot compute the real idf weight of OOV in a single
# pass.
idf_weights = tf.pad(
idf_weights,
[[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights),
)
if self.pad_to_max_tokens and self.max_tokens is not None:
# Pad the back of idf_weights with zeros.
idf_weights = tf.pad(
idf_weights,
[[0, self.max_tokens - tf.size(idf_weights)]],
constant_values=0,
)
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary,
# we don't want to keep every token we've seen in separate lookup
# tables.
self.reset_state()
self._record_vocabulary_size()
def reset_state(self):
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == "tf_idf":
self.token_document_counts.remove(
self.token_document_counts.export()[0]
)
self.num_documents.assign(0)
def call(self, inputs):
from keras.src.backend import tensorflow as tf_backend
self._ensure_known_vocab_size()
inputs = tf_utils.ensure_tensor(inputs, dtype=self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if isinstance(inputs, tf.SparseTensor):
lookups = tf.SparseTensor(
inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape,
)
elif isinstance(inputs, tf.RaggedTensor):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == "int":
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
idf_weights = (
self.idf_weights_const if self.output_mode == "tf_idf" else None
)
output = numerical_utils.encode_categorical_inputs(
lookups,
output_mode=(
"count" if self.output_mode == "tf_idf" else self.output_mode
),
depth=depth,
dtype=self._value_dtype,
sparse=self.sparse,
backend_module=tf_backend,
)
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"When `output_mode` is `'tf_idf'`, `idf_weights` must be "
"provided."
)
output = tf_backend.numpy.multiply(
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/stft_spectrogram.py | keras/src/layers/preprocessing/stft_spectrogram.py | import math
import warnings
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.utils.module_utils import scipy
@keras_export("keras.layers.STFTSpectrogram")
class STFTSpectrogram(layers.Layer):
"""Layer to compute the Short-Time Fourier Transform (STFT) on a 1D signal.
A layer that computes Spectrograms of the input signal to produce
a spectrogram. This layers utilizes Short-Time Fourier Transform (STFT) by
The layer computes Spectrograms based on STFT by utilizing convolution
kernels, which allows parallelization on GPUs and trainable kernels for
fine-tuning support. This layer allows different modes of output
(e.g., log-scaled magnitude, phase, power spectral density, etc.) and
provides flexibility in windowing, padding, and scaling options for the
STFT calculation.
Examples:
Apply it as a non-trainable preprocessing layer on 3 audio tracks of
1 channel, 10 seconds and sampled at 16 kHz.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128, # 50% overlap
... fft_length=512,
... window="hann",
... padding="valid",
... trainable=False, # non-trainable, preprocessing only
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 1))).shape
(3, 1249, 257)
Apply it as a trainable processing layer on 3 stereo audio tracks of
2 channels, 10 seconds and sampled at 16 kHz. This is initialized as the
non-trainable layer, but then can be trained jointly within a model.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128, # 50% overlap
... fft_length=512,
... window="hamming", # hamming windowing function
... padding="same", # padding to preserve the time dimension
... trainable=True, # trainable, this is the default in keras
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 2))).shape
(3, 1250, 514)
Similar to the last example, but add an extra dimension so the output is
an image to be used with image models. We apply this here on a signal of
3 input channels to output an image tensor, hence is directly applicable
with an image model.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128,
... fft_length=512,
... padding="same",
... expand_dims=True, # this adds the extra dimension
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 3))).shape
(3, 1250, 257, 3)
Args:
mode: String, the output type of the spectrogram. Can be one of
`"log"`, `"magnitude`", `"psd"`, `"real`", `"imag`", `"angle`",
`"stft`". Defaults to `"log`".
frame_length: Integer, The length of each frame (window) for STFT in
samples. Defaults to 256.
frame_step: Integer, the step size (hop length) between
consecutive frames. If not provided, defaults to half the
frame_length. Defaults to `frame_length // 2`.
fft_length: Integer, the size of frequency bins used in the Fast-Fourier
Transform (FFT) to apply to each frame. Should be greater than or
equal to `frame_length`. Recommended to be a power of two. Defaults
to the smallest power of two that is greater than or equal
to `frame_length`.
window: (String or array_like), the windowing function to apply to each
frame. Can be `"hann`" (default), `"hamming`", or a custom window
provided as an array_like.
periodic: Boolean, if True, the window function will be treated as
periodic. Defaults to `False`.
scaling: String, type of scaling applied to the window. Can be
`"density`", `"spectrum`", or None. Default is `"density`".
padding: String, padding strategy. Can be `"valid`" or `"same`".
Defaults to `"valid"`.
expand_dims: Boolean, if True, will expand the output into spectrograms
into two dimensions to be compatible with image models.
Defaults to `False`.
data_format: String, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. Defaults to `"channels_last"`.
Raises:
ValueError: If an invalid value is provided for `"mode`", `"scaling`",
`"padding`", or other input arguments.
TypeError: If the input data type is not one of `"float16`",
`"float32`", or `"float64`".
Input shape:
A 3D tensor of shape `(batch_size, time_length, input_channels)`, if
`data_format=="channels_last"`, and of shape
`(batch_size, input_channels, time_length)` if
`data_format=="channels_first"`, where `time_length` is the length of
the input signal, and `input_channels` is the number of input channels.
The same kernels are applied to each channel independently.
Output shape:
If `data_format=="channels_first" and not expand_dims`, a 3D tensor:
`(batch_size, input_channels * freq_channels, new_time_length)`
If `data_format=="channels_last" and not expand_dims`, a 3D tensor:
`(batch_size, new_time_length, input_channels * freq_channels)`
If `data_format=="channels_first" and expand_dims`, a 4D tensor:
`(batch_size, input_channels, new_time_length, freq_channels)`
If `data_format=="channels_last" and expand_dims`, a 4D tensor:
`(batch_size, new_time_length, freq_channels, input_channels)`
where `new_time_length` depends on the padding, and `freq_channels` is
the number of FFT bins `(fft_length // 2 + 1)`.
"""
def __init__(
self,
mode="log",
frame_length=256,
frame_step=None,
fft_length=None,
window="hann",
periodic=False,
scaling="density",
padding="valid",
expand_dims=False,
data_format=None,
**kwargs,
):
if frame_step is not None and (
frame_step > frame_length or frame_step < 1
):
raise ValueError(
"`frame_step` should be a positive integer not greater than "
f"`frame_length`. Received frame_step={frame_step}, "
f"frame_length={frame_length}"
)
if fft_length is not None and fft_length < frame_length:
raise ValueError(
"`fft_length` should be not less than `frame_length`. "
f"Received fft_length={fft_length}, frame_length={frame_length}"
)
if fft_length is not None and (fft_length & -fft_length) != fft_length:
warnings.warn(
"`fft_length` is recommended to be a power of two. "
f"Received fft_length={fft_length}"
)
all_modes = ["log", "magnitude", "psd", "real", "imag", "angle", "stft"]
if mode not in all_modes:
raise ValueError(
"Output mode is invalid, it must be one of "
f"{', '.join(all_modes)}. Received: mode={mode}"
)
if scaling is not None and scaling not in ["density", "spectrum"]:
raise ValueError(
"Scaling is invalid, it must be `None`, 'density' "
f"or 'spectrum'. Received scaling={scaling}"
)
if padding not in ["valid", "same"]:
raise ValueError(
"Padding is invalid, it should be 'valid', 'same'. "
f"Received: padding={padding}"
)
if isinstance(window, str):
# throws an exception for invalid window function
scipy.signal.get_window(window, 1)
super().__init__(**kwargs)
self.mode = mode
self.frame_length = frame_length
self.frame_step = frame_step
self._frame_step = frame_step or self.frame_length // 2
self.fft_length = fft_length
self._fft_length = fft_length or (
2 ** int(math.ceil(math.log2(frame_length)))
)
self.window = window
self.periodic = periodic
self.scaling = scaling
self.padding = padding
self.expand_dims = expand_dims
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = layers.input_spec.InputSpec(ndim=3)
def build(self, input_shape):
shape = (self.frame_length, 1, self._fft_length // 2 + 1)
if self.mode != "imag":
self.real_kernel = self.add_weight(
name="real_kernel",
shape=shape,
initializer=initializers.STFT(
"real", self.window, self.scaling, self.periodic
),
)
if self.mode != "real":
self.imag_kernel = self.add_weight(
name="imag_kernel",
shape=shape,
initializer=initializers.STFT(
"imag", self.window, self.scaling, self.periodic
),
)
def _adjust_shapes(self, outputs):
_, channels, freq_channels, time_seq = ops.shape(outputs)
batch_size = -1
if self.data_format == "channels_last":
if self.expand_dims:
outputs = ops.transpose(outputs, [0, 3, 2, 1])
# [batch_size, time_seq, freq_channels, input_channels]
else:
outputs = ops.reshape(
outputs,
[batch_size, channels * freq_channels, time_seq],
)
# [batch_size, input_channels * freq_channels, time_seq]
outputs = ops.transpose(outputs, [0, 2, 1])
else:
if self.expand_dims:
outputs = ops.transpose(outputs, [0, 1, 3, 2])
# [batch_size, channels, time_seq, freq_channels]
else:
outputs = ops.reshape(
outputs,
[batch_size, channels * freq_channels, time_seq],
)
return outputs
def _apply_conv(self, inputs, kernel):
if self.data_format == "channels_last":
_, time_seq, channels = ops.shape(inputs)
inputs = ops.transpose(inputs, [0, 2, 1])
inputs = ops.reshape(inputs, [-1, time_seq, 1])
else:
_, channels, time_seq = ops.shape(inputs)
inputs = ops.reshape(inputs, [-1, 1, time_seq])
outputs = ops.conv(
inputs,
ops.cast(kernel, backend.standardize_dtype(inputs.dtype)),
padding=self.padding,
strides=self._frame_step,
data_format=self.data_format,
)
batch_size = -1
if self.data_format == "channels_last":
_, time_seq, freq_channels = ops.shape(outputs)
outputs = ops.transpose(outputs, [0, 2, 1])
outputs = ops.reshape(
outputs,
[batch_size, channels, freq_channels, time_seq],
)
else:
_, freq_channels, time_seq = ops.shape(outputs)
outputs = ops.reshape(
outputs,
[batch_size, channels, freq_channels, time_seq],
)
return outputs
def call(self, inputs):
dtype = inputs.dtype
if backend.standardize_dtype(dtype) not in {
"float16",
"float32",
"float64",
}:
raise TypeError(
"Invalid input type. Expected `float16`, `float32` or "
f"`float64`. Received: input type={dtype}"
)
real_signal = None
imag_signal = None
power = None
if self.mode != "imag":
real_signal = self._apply_conv(inputs, self.real_kernel)
if self.mode != "real":
imag_signal = self._apply_conv(inputs, self.imag_kernel)
if self.mode == "real":
return self._adjust_shapes(real_signal)
elif self.mode == "imag":
return self._adjust_shapes(imag_signal)
elif self.mode == "angle":
return self._adjust_shapes(ops.arctan2(imag_signal, real_signal))
elif self.mode == "stft":
return self._adjust_shapes(
ops.concatenate([real_signal, imag_signal], axis=2)
)
else:
power = ops.square(real_signal) + ops.square(imag_signal)
if self.mode == "psd":
return self._adjust_shapes(
power
+ ops.pad(
power[:, :, 1:-1, :], [[0, 0], [0, 0], [1, 1], [0, 0]]
)
)
linear_stft = self._adjust_shapes(
ops.sqrt(ops.maximum(power, backend.epsilon()))
)
if self.mode == "magnitude":
return linear_stft
else:
return ops.log(ops.maximum(linear_stft, backend.epsilon()))
def compute_output_shape(self, input_shape):
if self.data_format == "channels_last":
channels = input_shape[-1]
else:
channels = input_shape[1]
freq_channels = self._fft_length // 2 + 1
if self.mode == "stft":
freq_channels *= 2
shape = ops.operation_utils.compute_conv_output_shape(
input_shape,
freq_channels * channels,
(self.frame_length,),
strides=self._frame_step,
padding=self.padding,
data_format=self.data_format,
)
if self.data_format == "channels_last":
batch_size, time_seq, _ = shape
else:
batch_size, _, time_seq = shape
if self.expand_dims:
if self.data_format == "channels_last":
return (batch_size, time_seq, freq_channels, channels)
else:
return (batch_size, channels, time_seq, freq_channels)
return shape
def get_config(self):
config = super().get_config()
config.update(
{
"mode": self.mode,
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"window": self.window,
"periodic": self.periodic,
"scaling": self.scaling,
"padding": self.padding,
"data_format": self.data_format,
"expand_dims": self.expand_dims,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/integer_lookup_test.py | keras/src/layers/preprocessing/integer_lookup_test.py | import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IntegerLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3],
oov_token=1,
mask_token=0,
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
adapt_data = [1, 1, 1, 2, 2, 3]
single_sample_input_data = [1, 2, 4]
batch_input_data = [[1, 2, 4], [2, 3, 5]]
# int mode
layer = layers.IntegerLookup(
output_mode="int",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 2, 0]))
output = layer(batch_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1, 2, 0], [2, 3, 0]]))
# one_hot mode
layer = layers.IntegerLookup(
output_mode="one_hot",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(
output, np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]])
)
# multi_hot mode
layer = layers.IntegerLookup(
output_mode="multi_hot",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 1, 1, 0]))
# tf_idf mode
layer = layers.IntegerLookup(
output_mode="tf_idf",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(
output, np.array([1.133732, 0.916291, 1.098612, 0.0])
)
# count mode
layer = layers.IntegerLookup(
output_mode="count",
)
layer.adapt(adapt_data)
output = layer([1, 2, 3, 4, 1, 2, 1])
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 3, 2, 1]))
def test_fixed_vocabulary(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3, 4],
)
input_data = [2, 3, 4, 5]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 4, 0]))
def test_set_vocabulary(self):
layer = layers.IntegerLookup(
output_mode="int",
)
layer.set_vocabulary([1, 2, 3, 4])
input_data = [2, 3, 4, 5]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 4, 0]))
def test_tf_data_compatibility(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3, 4],
)
input_data = [2, 3, 4, 5]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 4, 0]))
def test_one_hot_output_with_higher_rank_input(self):
input_data = np.array([[1, 2], [3, 0]])
vocabulary = [1, 2, 3]
layer = layers.IntegerLookup(
vocabulary=vocabulary, output_mode="one_hot"
)
output_data = layer(input_data)
self.assertEqual(output_data.shape, (2, 2, 4))
expected_output = np.array(
[
[[0, 1, 0, 0], [0, 0, 1, 0]],
[[0, 0, 0, 1], [1, 0, 0, 0]],
]
)
self.assertAllClose(output_data, expected_output)
output_data_3d = layer(np.expand_dims(input_data, axis=0))
self.assertEqual(output_data_3d.shape, (1, 2, 2, 4))
self.assertAllClose(
output_data_3d, np.expand_dims(expected_output, axis=0)
)
def test_multi_hot_output_shape(self):
input_data = np.array([[1, 2], [3, 0]])
vocabulary = [1, 2, 3]
layer = layers.IntegerLookup(
vocabulary=vocabulary, output_mode="multi_hot"
)
output_data = layer(input_data)
self.assertEqual(output_data.shape, (2, 4))
def test_count_output_shape(self):
input_data = np.array([[1, 2], [3, 0]])
vocabulary = [1, 2, 3]
layer = layers.IntegerLookup(vocabulary=vocabulary, output_mode="count")
output_data = layer(input_data)
self.assertEqual(output_data.shape, (2, 4))
def test_tf_idf_output_shape(self):
input_data = np.array([[1, 2], [3, 0]])
vocabulary = [1, 2, 3]
idf_weights = [1.0, 1.0, 1.0]
layer = layers.IntegerLookup(
vocabulary=vocabulary,
idf_weights=idf_weights,
output_mode="tf_idf",
)
output_data = layer(input_data)
self.assertEqual(output_data.shape, (2, 4))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/feature_space_test.py | keras/src/layers/preprocessing/feature_space_test.py | import os
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing import feature_space
from keras.src.saving import saving_api
class FeatureSpaceTest(testing.TestCase):
def _get_train_data_dict(
self,
as_dataset=False,
as_tensors=False,
as_labeled_dataset=False,
include_strings=True,
):
data = {
"float_1": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_2": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_3": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_2": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_3": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
if include_strings:
data["string_1"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
data["string_2"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
if as_dataset:
return tf_data.Dataset.from_tensor_slices(data)
elif as_tensors:
return {
key: ops.convert_to_tensor(value) for key, value in data.items()
}
elif as_labeled_dataset:
labels = [0, 1, 0, 1, 0, 0, 1, 0, 1, 1]
return tf_data.Dataset.from_tensor_slices((data, labels))
return data
def test_basic_usage_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2"), ("int_2", "int_3")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test batched adapt
fs.adapt(
self._get_train_data_dict(
as_dataset=True, include_strings=False
).batch(4)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
out_dim = 152
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on backend tensors
data = self._get_train_data_dict(as_tensors=True, include_strings=False)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2")],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["int_1_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints_no_strings(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("int_1", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["int_1_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_1_X_int_2"].dtype).startswith(
"int"
)
)
def test_basic_usage(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test batched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True).batch(4))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
out_dim = 195
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on tensors
if backend.backend() == "tensorflow":
data = self._get_train_data_dict(as_tensors=True)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on tensors
if backend.backend() == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (11,))
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["string_2_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
# Test batched call on tensors
if backend.backend() == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": cls.string_categorical(output_mode="int"),
"string_2": cls.string_hashed(num_bins=32, output_mode="int"),
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("float_3", "string_1"), output_mode="int", crossing_dim=32
),
cls.cross(
("string_2", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_1"].dtype).startswith("int")
)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["string_2_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_2_X_int_2"].dtype).startswith(
"int"
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string dtype."
)
def test_functional_api_sync_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
inputs = fs.get_inputs()
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=inputs, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(as_labeled_dataset=True)
model.fit(ds.batch(4))
model.evaluate(ds.batch(4))
ds = self._get_train_data_dict(as_dataset=True)
model.predict(ds.batch(4))
@pytest.mark.requires_trainable_backend
def test_tf_data_async_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "int_1"), ("int_1", "int_2")],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=features, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
# Try map before batch
ds = ds.map(lambda x, y: (fs(x), y))
model.fit(ds.batch(4))
# Try map after batch
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
ds = ds.batch(4)
ds = ds.map(lambda x, y: (fs(x), y))
model.evaluate(ds)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
ds = ds.map(fs)
model.predict(ds.batch(4))
def test_advanced_usage(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"string_1": cls.string_categorical(max_tokens=5),
"string_2": cls.string_hashed(num_bins=32),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "string_1"), crossing_dim=32),
cls.cross(("string_2", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertEqual(out.shape, (148,))
def test_manual_kpl(self):
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
cls = feature_space.FeatureSpace
# Test with a tf-idf TextVectorization layer
tv = layers.TextVectorization(output_mode="tf_idf")
fs = feature_space.FeatureSpace(
features={
"text": cls.feature(
preprocessor=tv, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
fs.adapt(tf_data.Dataset.from_tensor_slices(data))
out = fs(data)
self.assertEqual(list(out.shape), [3, 5])
def test_no_adapt(self):
data = {
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"text_1": [
"This is",
"not just",
"an example",
"of random words.",
"these are",
"some words",
"in",
"a random",
"example.",
"Bye!",
],
"float_1": [
-1.2,
0.0,
2.4,
1.2,
15.0,
-100.0,
23.1,
3.12,
0.1,
-0.01,
],
}
cls = feature_space.FeatureSpace
# Pre-defined vocabulary. No need to adapt.
tv_vocab = [
"this",
"is",
"just",
"an",
"example",
"with",
"some",
"words",
]
tv_with_vocab = layers.TextVectorization(
vocabulary=tv_vocab, output_mode="int", output_sequence_length=3
)
# Pre-defined mean and variance. No need to adapt.
mean, variance = 12.0, 5.0
normalization = layers.Normalization(mean=mean, variance=variance)
fs = feature_space.FeatureSpace(
{
"int_1": "integer_hashed",
"text_1": cls.feature(
dtype="string",
preprocessor=tv_with_vocab,
output_mode="int",
),
"float_1": cls.feature(
dtype="float32",
preprocessor=normalization,
output_mode="float",
),
},
output_mode="dict",
)
out = fs(data)
float_out = ops.divide(
ops.convert_to_tensor(data["float_1"]) - mean, ops.sqrt(variance)
)
float_out = ops.reshape(float_out, (10, -1))
self.assertEqual(tuple(out["int_1"].shape), (10, 32))
self.assertEqual(tuple(out["text_1"].shape), (10, 3))
self.assertAllClose(out["float_1"], float_out, atol=1e-3)
@pytest.mark.skipif(
backend.backend() in ("numpy", "torch"),
reason=(
"TODO: When using FeatureSpace as a Model in torch and numpy, "
"the error is large."
),
)
def test_saving(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "int_1"), crossing_dim=32),
cls.cross(("int_1", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
ref_out = fs(data)
temp_filepath = os.path.join(self.get_temp_dir(), "fs.keras")
fs.save(temp_filepath)
fs = saving_api.load_model(temp_filepath)
# Save again immediately after loading to test idempotency
temp_filepath = os.path.join(self.get_temp_dir(), "fs2.keras")
fs.save(temp_filepath)
# Test correctness of the first saved FS
out = fs(data)
self.assertAllClose(out, ref_out)
inputs = fs.get_inputs()
outputs = fs.get_encoded_features()
model = models.Model(inputs=inputs, outputs=outputs)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
out = model.predict(ds.batch(4))
self.assertAllClose(out[0], ref_out)
# Test correctness of the re-saved FS
fs = saving_api.load_model(temp_filepath)
out = fs(data)
self.assertAllClose(out, ref_out)
def test_errors(self):
# Test no features
with self.assertRaisesRegex(ValueError, "cannot be None or empty"):
feature_space.FeatureSpace(features={})
# Test no crossing dim
with self.assertRaisesRegex(ValueError, "`crossing_dim`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "f2")],
crossing_dim=None,
)
# Test wrong cross feature name
with self.assertRaisesRegex(ValueError, "should be present in "):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "unknown")],
crossing_dim=32,
)
# Test wrong output mode
with self.assertRaisesRegex(ValueError, "for argument `output_mode`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
output_mode="unknown",
)
# Test call before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs({"f1": [0], "f2": [0]})
# Test get_encoded_features before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs.get_encoded_features()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/pipeline.py | keras/src/layers/preprocessing/pipeline.py | from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessing pipeline,
in particular an image data augmentation pipeline.
Compared to a `Sequential` model, `Pipeline` features
a few important differences:
- It's not a `Model`, just a plain layer.
- When the layers in the pipeline are compatible
with `tf.data`, the pipeline will also
remain `tf.data` compatible. That is to say,
the pipeline will not attempt to convert
its inputs to backend-native tensors
when in a tf.data context (unlike a `Sequential`
model).
Example:
```python
from keras import layers
preprocessing_pipeline = layers.Pipeline([
layers.AutoContrast(),
layers.RandomZoom(0.2),
layers.RandomRotation(0.2),
])
# `ds` is a tf.data.Dataset
preprocessed_ds = ds.map(
preprocessing_pipeline,
num_parallel_calls=4,
)
```
"""
def __init__(self, layers, name=None):
super().__init__(name=name)
self._pipeline_layers = layers
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def layers(self):
return self._pipeline_layers
def call(self, inputs, training=True, mask=None):
for layer in self._pipeline_layers:
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
@classmethod
def from_config(cls, config):
config["layers"] = [
serialization_lib.deserialize_keras_object(x)
for x in config["layers"]
]
return cls(**config)
def get_config(self):
config = {
"layers": serialization_lib.serialize_keras_object(
self._pipeline_layers
),
"name": self.name,
}
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/__init__.py | keras/src/layers/preprocessing/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/integer_lookup.py | keras/src/layers/preprocessing/integer_lookup.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.index_lookup import IndexLookup
from keras.src.utils import backend_utils
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.IntegerLookup")
class IntegerLookup(IndexLookup):
"""A preprocessing layer that maps integers to (possibly encoded) indices.
This layer maps a set of arbitrary integer input tokens into indexed integer
output via a table-based vocabulary lookup. The layer's output indices will
be contiguously arranged up to the maximum vocab size, even if the input
tokens are non-continguous or unbounded. The layer supports multiple options
for encoding the output via `output_mode`, and has optional support for
out-of-vocabulary (OOV) tokens and masking.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual integer tokens, and create a
vocabulary from them. If the vocabulary is capped in size, the most frequent
tokens will be used to create the vocabulary and all others will be treated
as OOV.
There are two possible output modes for the layer. When `output_mode` is
`"int"`, input integers are converted to their index in the vocabulary (an
integer). When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`,
input integers are encoded into an array where each dimension corresponds to
an element in the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode`
is `"int"`, the vocabulary will begin with the mask token at index 0,
followed by OOV indices, followed by the rest of the vocabulary. When
`output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will
begin with OOV indices and instances of the mask token will be dropped.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of
the vocabulary. Note that this size includes the OOV
and mask tokens. Defaults to `None`.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are modulated to
determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling
the layer. Defaults to `1`.
mask_token: An integer token that represents masked inputs. When
`output_mode` is `"int"`, the token is included in vocabulary
and mapped to index 0. In other output modes,
the token will not appear in the vocabulary and instances
of the mask token in the input will be dropped.
If set to None, no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is `True`. The token to return
for OOV indices. Defaults to `-1`.
vocabulary: Optional. Either an array of integers or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the integer vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
vocabulary_dtype: The dtype of the vocabulary terms.
Only `vocabulary_dtype='int64'` is supported at this time.
Defaults to `"int64"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D NumPy array, or 1D tensor or the same length
as the vocabulary, containing the floating point inverse document
frequency weights, which will be multiplied by per sample term
counts for the final TF-IDF weight.
If the `vocabulary` argument is set, and `output_mode` is
`"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the vocabulary indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary,
containing a 1 at the element index. If the last dimension
is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`,
output shape will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is
applied to find the value in each token slot.
For `"int"` output, the output shape matches the input shape.
For `"one_hot"` output, the output shape is
`input_shape + (vocabulary_size,)`, where `input_shape` may
have arbitrary rank. For other output modes (`"multi_hot"`,
`"count"`, `"tf_idf"`), the output shape is `(batch_size,
vocabulary_size)`. Defaults to `"int"`.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab)
>>> layer(data)
array([[1, 3, 4],
[4, 0, 2]])
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by
analyzing the dataset.
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
[-1, 42, 1138, 1000, 36, 12]
Note that the OOV token -1 have been added to the vocabulary. The remaining
tokens are sorted by frequency (42, which has 2 occurrences, is first) then
by inverse sort order.
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup()
>>> layer.adapt(data)
>>> layer(data)
array([[5, 2, 1],
[1, 3, 4]])
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV
indices. When a layer is created with more than one OOV index, any OOV
tokens are hashed into the number of OOV buckets, distributing OOV tokens in
a deterministic fashion across the set.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [37, 1000, 36]])
>>> layer = IntegerLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
array([[2, 4, 5],
[1, 0, 3]])
Note that the output for OOV token 37 is 1, while the output for OOV token
1000 is 0. The in-vocab terms have their output index increased by 1 from
earlier examples (12 maps to 2, etc) in order to make space for the extra
OOV token.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([12, 36, 1138, 42, 7]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV tokens
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV tokens.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='count')
>>> layer(data)
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)
**TF-IDF output**
Configure the layer with `output_mode='tf_idf'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV tokens.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be
provided along with the vocabulary. Note that the `idf_weight` for OOV
tokens will default to the average of all idf weights passed in.
>>> vocab = [12, 36, 1138, 42]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(
... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
To specify the idf weights for oov tokens, you will need to pass the entire
vocabulary including the leading oov token.
>>> vocab = [-1, 12, 36, 1138, 42]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(
... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
When adapting the layer in `"tf_idf"` mode, each input sample will
be considered a document, and IDF weight per token will be
calculated as:
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to tokens using this layer.
(You can also use `adapt()` with `inverse=True`, but for simplicity we'll
pass the vocab in this example.)
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[1, 3, 4], [4, 0, 2]])
>>> layer = IntegerLookup(vocabulary=vocab, invert=True)
>>> layer(data)
array([[ 12, 1138, 42],
[ 42, -1, 36]])
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup(vocabulary=vocab)
>>> i_layer = IntegerLookup(
... vocabulary=layer.get_vocabulary(), invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
array([[ 12, 1138, 42],
[ 42, -1, 36]])
In this example, the input token 1000 resulted in an output of -1, since
1000 was not in the vocabulary - it got represented as an OOV, and all OOV
tokens are returned as -1 in the inverse layer. Also, note that for the
inverse to work, you must have already set the forward layer vocabulary
either directly or via `adapt()` before calling `get_vocabulary()`.
"""
def __init__(
self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token=-1,
vocabulary=None,
vocabulary_dtype="int64",
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer IntegerLookup requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If `max_tokens` is set for `IntegerLookup`, it must be "
f"greater than 1. Received: max_tokens={max_tokens}"
)
if num_oov_indices < 0:
raise ValueError(
"The value of `num_oov_indices` argument for `IntegerLookup` "
"must >= 0. Received: num_oov_indices="
f"{num_oov_indices}"
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse=True` can only be used with the TensorFlow backend."
)
if vocabulary_dtype != "int64":
raise ValueError(
"Only `vocabulary_dtype='int64'` is supported "
"at this time. Received: "
f"vocabulary_dtype={vocabulary_dtype}"
)
super().__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
vocabulary_dtype=vocabulary_dtype,
idf_weights=idf_weights,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
name=name,
**kwargs,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def adapt(self, data, steps=None):
"""Computes a vocabulary of integer terms from tokens in a dataset.
Calling `adapt()` on an `IntegerLookup` layer is an alternative to
passing in a precomputed vocabulary on construction via the
`vocabulary` argument. An `IntegerLookup` layer should always be either
adapted over a dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all integer
tokens seen in the dataset, sorted by occurrence count, with ties broken
by sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary will be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of integers,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
super().adapt(data, steps=steps)
def get_config(self):
config = super().get_config()
if config["oov_token"] is not None:
config["oov_token"] = int(config["oov_token"])
if config["mask_token"] is not None:
config["mask_token"] = int(config["mask_token"])
if config["vocabulary"] is not None:
config["vocabulary"] = [int(v) for v in config["vocabulary"]]
return config
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
outputs = super().call(inputs)
return backend_utils.convert_tf_tensor(outputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/discretization_test.py | keras/src/layers/preprocessing/discretization_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.saving import saving_api
from keras.src.testing.test_utils import named_product
class DiscretizationTest(testing.TestCase):
def test_discretization_basics(self):
self.run_layer_test(
layers.Discretization,
init_kwargs={
"bin_boundaries": [0.0, 0.5, 1.0],
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_adapt_flow(self):
layer = layers.Discretization(num_bins=4)
layer.adapt(
np.random.random((32, 3)),
)
output = layer(np.array([[0.0, 0.1, 0.3]]))
self.assertTrue(output.dtype, "int32")
@parameterized.named_parameters(
named_product(
[
{
"testcase_name": "int",
"output_mode": "int",
"input_array": [[-1.0, 0.0, 0.1, 0.8, 1.2]],
"expected_output": [[0, 1, 1, 2, 3]],
},
{
"testcase_name": "one_hot_rank_1",
"output_mode": "one_hot",
"input_array": [0.1, 0.8],
"expected_output": [[0, 1, 0, 0], [0, 0, 1, 0]],
},
{
"testcase_name": "multi_hot_rank_2",
"output_mode": "multi_hot",
"input_array": [[0.1, 0.8]],
"expected_output": [[0, 1, 1, 0]],
},
{
"testcase_name": "one_hot_rank_3",
"output_mode": "one_hot",
"input_array": [[[0.15, 0.75], [0.85, 0.45]]],
"expected_output": [
[
[[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]],
]
],
},
{
"testcase_name": "multi_hot_rank_3",
"output_mode": "multi_hot",
"input_array": [[[0.15, 0.75], [0.85, 0.45]]],
"expected_output": [
[[0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0]]
],
},
{
"testcase_name": "count",
"output_mode": "count",
"input_array": [[0.1, 0.8, 0.9]],
"expected_output": [[0, 1, 2, 0]],
},
],
sparse=(
[True, False] if backend.SUPPORTS_SPARSE_TENSORS else [False]
),
)
)
def test_correctness(
self, output_mode, input_array, expected_output, sparse
):
if output_mode == "int" and sparse:
pytest.skip("sparse=True cannot be combined with output_mode=int")
input_array = np.array(input_array)
expected_output = np.array(expected_output)
layer = layers.Discretization(
bin_boundaries=[0.0, 0.5, 1.0],
output_mode=output_mode,
sparse=sparse,
)
output = layer(input_array)
self.assertSparse(output, sparse)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, expected_output)
def test_tf_data_compatibility(self):
# With fixed bins
layer = layers.Discretization(
bin_boundaries=[0.0, 0.35, 0.5, 1.0], dtype="float32"
)
x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]])
self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]]))
ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]]))
# With adapt flow
layer = layers.Discretization(num_bins=4)
layer.adapt(
np.random.random((32, 3)),
)
x = np.array([[0.0, 0.1, 0.3]])
ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer)
for output in ds.take(1):
output.numpy()
def test_serialization(self):
layer = layers.Discretization(num_bins=5)
# Serialization before `adapt` is called.
config = layer.get_config()
revived_layer = layers.Discretization.from_config(config)
self.assertEqual(config, revived_layer.get_config())
# Serialization after `adapt` is called but `num_bins` was not reached.
layer.adapt(np.array([0.0, 1.0, 5.0]))
config = layer.get_config()
revived_layer = layers.Discretization.from_config(config)
self.assertEqual(config, revived_layer.get_config())
# Serialization after `adapt` is called and `num_bins` is reached.
layer.adapt(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]))
config = layer.get_config()
revived_layer = layers.Discretization.from_config(config)
self.assertEqual(config, revived_layer.get_config())
# Serialization with `bin_boundaries`.
layer = layers.Discretization(bin_boundaries=[0.0, 0.35, 0.5, 1.0])
config = layer.get_config()
revived_layer = layers.Discretization.from_config(config)
self.assertEqual(config, revived_layer.get_config())
def test_saving(self):
# With fixed bins
layer = layers.Discretization(bin_boundaries=[0.0, 0.35, 0.5, 1.0])
model = models.Sequential(
[
layers.Input((2,)),
layer,
]
)
fpath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(fpath)
model = saving_api.load_model(fpath)
x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]])
self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]]))
# With adapt flow
layer = layers.Discretization(num_bins=4)
layer.adapt(
np.random.random((32, 3)),
)
ref_input = np.random.random((1, 2))
ref_output = layer(ref_input)
model = models.Sequential(
[
layers.Input((2,)),
layer,
]
)
fpath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(fpath)
model = saving_api.load_model(fpath)
self.assertAllClose(layer(ref_input), ref_output)
def test_init_num_bins_and_bin_boundaries_raises(self):
with self.assertRaisesRegex(
ValueError, "Both `num_bins` and `bin_boundaries`"
):
layers.Discretization(num_bins=3, bin_boundaries=[0.0, 1.0])
with self.assertRaisesRegex(
ValueError, "either `num_bins` or `bin_boundaries`"
):
layers.Discretization()
def test_call_before_adapt_raises(self):
layer = layers.Discretization(num_bins=3)
with self.assertRaisesRegex(ValueError, "You need .* call .*adapt"):
layer([[0.1, 0.8, 0.9]])
def test_model_call_vs_predict_consistency(self):
"""Test that model(input) and model.predict(input) produce consistent outputs.""" # noqa: E501
# Test with int output mode
layer = layers.Discretization(
bin_boundaries=[-0.5, 0, 0.1, 0.2, 3],
output_mode="int",
)
x = np.array([[0.0, 0.15, 0.21, 0.3], [0.0, 0.17, 0.451, 7.8]])
# Create model
inputs = layers.Input(shape=(4,), dtype="float32")
outputs = layer(inputs)
model = models.Model(inputs=inputs, outputs=outputs)
# Test both execution modes
model_call_output = model(x)
predict_output = model.predict(x)
# Check consistency
self.assertAllClose(model_call_output, predict_output)
self.assertEqual(
backend.standardize_dtype(model_call_output.dtype),
backend.standardize_dtype(predict_output.dtype),
)
self.assertTrue(backend.is_int_dtype(model_call_output.dtype))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/normalization.py | keras/src/layers/preprocessing/normalization.py | import math
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.Normalization")
class Normalization(DataLayer):
"""A preprocessing layer that normalizes continuous features.
This layer will shift and scale inputs into a distribution centered around
0 with standard deviation 1. It accomplishes this by precomputing the mean
and variance of the data, and calling `(input - mean) / sqrt(var)` at
runtime.
The mean and variance values for the layer must be either supplied on
construction or learned via `adapt()`. `adapt()` will compute the mean and
variance of the data and store them as the layer's weights. `adapt()` should
be called before `fit()`, `evaluate()`, or `predict()`.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
axis: Integer, tuple of integers, or None. The axis or axes that should
have a separate mean and variance for each index in the shape.
For example, if shape is `(None, 5)` and `axis=1`, the layer will
track 5 separate mean and variance values for the last axis.
If `axis` is set to `None`, the layer will normalize
all elements in the input by a scalar mean and variance.
When `-1`, the last axis of the input is assumed to be a
feature dimension and is normalized per index.
Note that in the specific case of batched scalar inputs where
the only axis is the batch axis, the default will normalize
each index in the batch separately.
In this case, consider passing `axis=None`. Defaults to `-1`.
mean: The mean value(s) to use during normalization. The passed value(s)
will be broadcast to the shape of the kept axes above;
if the value(s) cannot be broadcast, an error will be raised when
this layer's `build()` method is called.
`mean` and `variance` must be specified together.
variance: The variance value(s) to use during normalization. The passed
value(s) will be broadcast to the shape of the kept axes above;
if the value(s) cannot be broadcast, an error will be raised when
this layer's `build()` method is called.
`mean` and `variance` must be specified together.
invert: If `True`, this layer will apply the inverse transformation
to its inputs: it would turn a normalized input back into its
original form.
Examples:
Calculate a global mean and variance by analyzing the dataset in `adapt()`.
>>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32')
>>> input_data = np.array([1., 2., 3.], dtype='float32')
>>> layer = keras.layers.Normalization(axis=None)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([-1.4142135, -0.70710677, 0.], dtype=float32)
Calculate a mean and variance for each index on the last axis.
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[0., 7., 4.]], dtype='float32')
>>> layer = keras.layers.Normalization(axis=-1)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([-1., -1., -1.], dtype=float32)
Pass the mean and variance directly.
>>> input_data = np.array([[1.], [2.], [3.]], dtype='float32')
>>> layer = keras.layers.Normalization(mean=3., variance=2.)
>>> layer(input_data)
array([[-1.4142135 ],
[-0.70710677],
[ 0. ]], dtype=float32)
Use the layer to de-normalize inputs (after adapting the layer).
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[1., 2., 3.]], dtype='float32')
>>> layer = keras.layers.Normalization(axis=-1, invert=True)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([2., 10., 8.], dtype=float32)
"""
def __init__(
self, axis=-1, mean=None, variance=None, invert=False, **kwargs
):
super().__init__(**kwargs)
# Standardize `axis` to a tuple.
if axis is None:
axis = ()
elif isinstance(axis, int):
axis = (axis,)
else:
axis = tuple(axis)
self.axis = axis
self.input_mean = mean
self.input_variance = variance
self.invert = invert
self.supports_masking = True
self._build_input_shape = None
self.mean = None
# Set `mean` and `variance` if passed.
if (mean is not None) != (variance is not None):
raise ValueError(
"When setting values directly, both `mean` and `variance` "
f"must be set. Received: mean={mean} and variance={variance}"
)
def build(self, input_shape):
if input_shape is None:
return
ndim = len(input_shape)
self._build_input_shape = input_shape
if any(a < -ndim or a >= ndim for a in self.axis):
raise ValueError(
"All `axis` values must be in the range [-ndim, ndim). "
f"Received inputs with ndim={ndim}, while axis={self.axis}"
)
# Axes to be kept, replacing negative values with positive equivalents.
# Sorted to avoid transposing axes.
self._keep_axis = tuple(
sorted([d if d >= 0 else d + ndim for d in self.axis])
)
# All axes to be kept should have known shape.
for d in self._keep_axis:
if input_shape[d] is None:
raise ValueError(
"All `axis` values to be kept must have a known shape. "
f"Received axis={self.axis}, "
f"inputs.shape={input_shape}, "
f"with unknown axis at index {d}"
)
# Axes to be reduced.
self._reduce_axis = tuple(
d for d in range(ndim) if d not in self._keep_axis
)
# 1 if an axis should be reduced, 0 otherwise.
self._reduce_axis_mask = [
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
self._broadcast_shape = [
input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
]
mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)
self._mean_and_var_shape = mean_and_var_shape
if self.input_mean is None:
self.adapt_mean = self.add_weight(
name="mean",
shape=mean_and_var_shape,
initializer="zeros",
trainable=False,
)
self.adapt_variance = self.add_weight(
name="variance",
shape=mean_and_var_shape,
initializer="ones",
trainable=False,
)
# For backwards compatibility with older saved models.
self.count = self.add_weight(
name="count",
shape=(),
dtype="int",
initializer="zeros",
trainable=False,
)
self.built = True
self.finalize_state()
else:
# In the no adapt case, make constant tensors for mean and variance
# with proper broadcast shape for use during call.
mean = ops.convert_to_tensor(self.input_mean)
variance = ops.convert_to_tensor(self.input_variance)
mean = ops.broadcast_to(mean, self._broadcast_shape)
variance = ops.broadcast_to(variance, self._broadcast_shape)
self.mean = ops.cast(mean, dtype=self.compute_dtype)
self.variance = ops.cast(variance, dtype=self.compute_dtype)
def adapt(self, data):
"""Computes the mean and variance of values in a dataset.
Calling `adapt()` on a `Normalization` layer is an alternative to
passing in `mean` and `variance` arguments during layer construction. A
`Normalization` layer should always either be adapted over a dataset or
passed `mean` and `variance`.
During `adapt()`, the layer will compute a `mean` and `variance`
separately for each position in each axis specified by the `axis`
argument. To calculate a single `mean` and `variance` over the input
data, simply pass `axis=None` to the layer.
Arg:
data: The data to train on. It can be passed either as a
`tf.data.Dataset`, as a NumPy array, or as a backend-native
eager tensor.
If a dataset, *it must be batched*. Keras will assume that the
data is batched, and if that assumption doesn't hold, the mean
and variance may be incorrectly computed.
"""
if isinstance(data, np.ndarray) or backend.is_tensor(data):
input_shape = data.shape
elif isinstance(data, tf.data.Dataset):
input_shape = tuple(data.element_spec.shape)
if len(input_shape) == 1:
# Batch dataset if it isn't batched
data = data.batch(128)
input_shape = tuple(data.element_spec.shape)
elif isinstance(data, PyDataset):
data = data[0]
if isinstance(data, tuple):
# handling (x, y) or (x, y, sample_weight)
data = data[0]
input_shape = data.shape
else:
raise TypeError(
f"Unsupported data type: {type(data)}. `adapt` supports "
f"`np.ndarray`, backend tensors, `tf.data.Dataset`, and "
f"`keras.utils.PyDataset`."
)
if not self.built:
self.build(input_shape)
else:
for d in self._keep_axis:
if input_shape[d] != self._build_input_shape[d]:
raise ValueError(
"The layer was built with "
f"input_shape={self._build_input_shape}, "
"but adapt() is being called with data with "
f"an incompatible shape, data.shape={input_shape}"
)
if isinstance(data, np.ndarray):
total_mean = np.mean(data, axis=self._reduce_axis)
total_var = np.var(data, axis=self._reduce_axis)
elif backend.is_tensor(data):
total_mean = ops.mean(data, axis=self._reduce_axis)
total_var = ops.var(data, axis=self._reduce_axis)
elif isinstance(data, (tf.data.Dataset, PyDataset)):
total_mean = ops.zeros(self._mean_and_var_shape)
total_var = ops.zeros(self._mean_and_var_shape)
total_count = 0
for batch in data:
batch = backend.convert_to_tensor(
batch, dtype=self.compute_dtype
)
batch_mean = ops.mean(batch, axis=self._reduce_axis)
batch_var = ops.var(batch, axis=self._reduce_axis)
if self._reduce_axis:
batch_reduce_shape = (
batch.shape[d] for d in self._reduce_axis
)
batch_count = math.prod(batch_reduce_shape)
else:
batch_count = 1
total_count += batch_count
batch_weight = float(batch_count) / total_count
existing_weight = 1.0 - batch_weight
new_total_mean = (
total_mean * existing_weight + batch_mean * batch_weight
)
# The variance is computed using the lack-of-fit sum of squares
# formula (see
# https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).
total_var = (
total_var + (total_mean - new_total_mean) ** 2
) * existing_weight + (
batch_var + (batch_mean - new_total_mean) ** 2
) * batch_weight
total_mean = new_total_mean
else:
raise NotImplementedError(f"Unsupported data type: {type(data)}")
self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_var)
self.finalize_state()
def finalize_state(self):
if self.input_mean is not None or not self.built:
return
# In the adapt case, we make constant tensors for mean and variance with
# proper broadcast shape and dtype each time `finalize_state` is called.
self.mean = ops.reshape(self.adapt_mean, self._broadcast_shape)
self.mean = ops.cast(self.mean, self.compute_dtype)
self.variance = ops.reshape(self.adapt_variance, self._broadcast_shape)
self.variance = ops.cast(self.variance, self.compute_dtype)
def call(self, inputs):
# This layer can be called in tf.data
# even with another backend after it has been adapted.
# However it must use backend-native logic for adapt().
if self.mean is None:
# May happen when in tf.data when mean/var was passed explicitly
raise ValueError(
"You must call `.build(input_shape)` "
"on the layer before using it."
)
inputs = self.backend.core.convert_to_tensor(
inputs, dtype=self.compute_dtype
)
# Ensure the weights are in the correct backend. Without this, it is
# possible to cause breakage when using this layer in tf.data.
mean = self.convert_weight(self.mean)
variance = self.convert_weight(self.variance)
if self.invert:
return self.backend.numpy.add(
mean,
self.backend.numpy.multiply(
inputs,
self.backend.numpy.maximum(
self.backend.numpy.sqrt(variance), backend.epsilon()
),
),
)
else:
return self.backend.numpy.divide(
self.backend.numpy.subtract(inputs, mean),
self.backend.numpy.maximum(
self.backend.numpy.sqrt(variance), backend.epsilon()
),
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"axis": self.axis,
"invert": self.invert,
"mean": np.array(self.input_mean).tolist(),
"variance": np.array(self.input_variance).tolist(),
}
)
return config
def load_own_variables(self, store):
super().load_own_variables(store)
# Ensure that we call finalize_state after variable loading.
self.finalize_state()
def get_build_config(self):
if self._build_input_shape:
return {"input_shape": self._build_input_shape}
def build_from_config(self, config):
if config:
self.build(config["input_shape"])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/hashing.py | keras/src/layers/preprocessing/hashing.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.utils import backend_utils
from keras.src.utils import numerical_utils
from keras.src.utils import tf_utils
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.Hashing")
class Hashing(Layer):
"""A preprocessing layer which hashes and bins categorical features.
This layer transforms categorical inputs to hashed output. It element-wise
converts a ints or strings to ints in a fixed range. The stable hash
function uses `tensorflow::ops::Fingerprint` to produce the same output
consistently across all platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random
`salt` argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
**Note:** This layer internally uses TensorFlow. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
**Example (FarmHash64)**
>>> layer = keras.layers.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[1],
[0],
[1],
[1],
[2]])>
**Example (FarmHash64) with a mask value**
>>> layer = keras.layers.Hashing(num_bins=3, mask_value='')
>>> inp = [['A'], ['B'], [''], ['C'], ['D']]
>>> layer(inp)
array([[1],
[1],
[0],
[2],
[2]])
**Example (SipHash64)**
>>> layer = keras.layers.Hashing(num_bins=3, salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[1],
[2],
[1],
[0],
[2]])
**Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**
>>> layer = keras.layers.Hashing(num_bins=3, salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[0],
[0],
[2],
[1],
[0]])
Args:
num_bins: Number of hash bins. Note that this includes the `mask_value`
bin, so the effective number of bins is `(num_bins - 1)`
if `mask_value` is set.
mask_value: A value that represents masked inputs, which are mapped to
index 0. `None` means no mask term will be added and the
hashing will start at index 0. Defaults to `None`.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64,
with these values used as an additional input
(known as a "salt" in cryptography).
These should be non-zero. If `None`, uses the FarmHash64 hash
function. It also supports tuple/list of 2 unsigned
integer numbers, see reference paper for details.
Defaults to `None`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, or
`"count"` configuring the layer as follows:
- `"int"`: Return the integer bin indices directly.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as `num_bins`, containing a 1
at the input's bin index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a
single array the same size as `num_bins`,
containing a 1 for each bin index
index present in the sample. Treats the last dimension
as the sample dimension, if input shape is
`(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count of
the number of times the bin index appeared in the sample.
Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
and `"count"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to `False`.
**kwargs: Keyword arguments to construct a layer.
Input shape:
A single string, a list of strings, or an `int32` or `int64` tensor
of shape `(batch_size, ...,)`.
Output shape:
An `int32` tensor of shape `(batch_size, ...)`.
Reference:
- [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
"""
def __init__(
self,
num_bins,
mask_value=None,
salt=None,
output_mode="int",
sparse=False,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer Hashing requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
# By default, output int32 when output_mode='int' and floats otherwise.
if "dtype" not in kwargs or kwargs["dtype"] is None:
kwargs["dtype"] = (
"int64" if output_mode == "int" else backend.floatx()
)
super().__init__(**kwargs)
if num_bins is None or num_bins <= 0:
raise ValueError(
"The `num_bins` for `Hashing` cannot be `None` or "
f"non-positive values. Received: num_bins={num_bins}."
)
if output_mode == "int" and (
self.dtype_policy.name not in ("int32", "int64")
):
raise ValueError(
'When `output_mode="int"`, `dtype` should be an integer '
f"type, 'int32' or 'in64'. Received: dtype={kwargs['dtype']}"
)
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT)
accepted_output_modes = ("int", "one_hot", "multi_hot", "count")
if output_mode not in accepted_output_modes:
raise ValueError(
"Invalid value for argument `output_mode`. "
f"Expected one of {accepted_output_modes}. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
'`"one_hot"`, `"multi_hot"`, or `"count"`. '
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
self.num_bins = num_bins
self.mask_value = mask_value
self.strong_hash = True if salt is not None else False
self.output_mode = output_mode
self.sparse = sparse
self.salt = None
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = list(salt)
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError(
"The `salt` argument for `Hashing` can only be a tuple of "
"size 2 integers, or a single integer. "
f"Received: salt={salt}."
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def call(self, inputs):
from keras.src.backend import tensorflow as tf_backend
inputs = tf_utils.ensure_tensor(inputs)
if self.output_mode == "one_hot" and inputs.shape[-1] == 1:
# One hot only upranks if the final dimension is not 1.
inputs = tf_backend.numpy.squeeze(inputs, axis=-1)
if isinstance(inputs, tf.SparseTensor):
indices = tf.SparseTensor(
indices=inputs.indices,
values=self._hash_values_to_bins(inputs.values),
dense_shape=inputs.dense_shape,
)
else:
indices = self._hash_values_to_bins(inputs)
outputs = numerical_utils.encode_categorical_inputs(
indices,
output_mode=self.output_mode,
depth=self.num_bins,
sparse=self.sparse,
dtype=self.dtype,
backend_module=tf_backend,
)
return backend_utils.convert_tf_tensor(outputs)
def _hash_values_to_bins(self, values):
"""Converts a non-sparse tensor of values to bin indices."""
hash_bins = self.num_bins
mask = None
# If mask_value is set, the zeroth bin is reserved for it.
if self.mask_value is not None and hash_bins > 1:
hash_bins -= 1
mask = tf.equal(values, self.mask_value)
# Convert all values to strings before hashing.
# Floats are first normalized to int64.
if values.dtype.is_floating:
values = tf.cast(values, dtype="int64")
if values.dtype != tf.string:
values = tf.as_string(values)
# Hash the strings.
if self.strong_hash:
values = tf.strings.to_hash_bucket_strong(
values, hash_bins, name="hash", key=self.salt
)
else:
values = tf.strings.to_hash_bucket_fast(
values, hash_bins, name="hash"
)
if mask is not None:
values = tf.add(values, tf.ones_like(values))
values = tf.where(mask, tf.zeros_like(values), values)
return values
def compute_output_spec(self, inputs):
if self.output_mode == "int":
return backend.KerasTensor(shape=inputs.shape, dtype=self.dtype)
if len(inputs.shape) >= 1:
base_shape = tuple(inputs.shape)[:-1]
else:
base_shape = ()
return backend.KerasTensor(
shape=base_shape + (self.num_bins,), dtype=self.dtype
)
def get_config(self):
config = super().get_config()
config.update(
{
"num_bins": self.num_bins,
"salt": self.salt,
"mask_value": self.mask_value,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/category_encoding.py | keras/src/layers/preprocessing/category_encoding.py | from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.utils import backend_utils
from keras.src.utils import numerical_utils
@keras_export("keras.layers.CategoryEncoding")
class CategoryEncoding(DataLayer):
"""A preprocessing layer which encodes integer features.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs, and it outputs a dense or sparse representation of those
inputs. For integer inputs where the total number of tokens is not known,
use `keras.layers.IntegerLookup` instead.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Examples:
**One-hot encoding data**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]]>
**Multi-hot encoding data**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]]>
**Using weighted inputs in `"count"` mode**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]]>
Args:
num_tokens: The total number of tokens the layer should support. All
inputs to the layer must integers in the range `0 <= value <
num_tokens`, or an error will be thrown.
output_mode: Specification for the output of the layer.
Values can be `"one_hot"`, `"multi_hot"` or `"count"`,
configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input
into an array of `num_tokens` size, containing a 1 at the
element index. If the last dimension is size 1, will encode
on that dimension. If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array of `num_tokens` size, containing a 1 for each
vocabulary term present in the sample. Treats the last
dimension as the sample dimension, if input shape is
`(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: Like `"multi_hot"`, but the int array contains a
count of the number of times the token at that index
appeared in the sample.
For all output modes, currently only output up to rank 2 is
supported.
Defaults to `"multi_hot"`.
sparse: Whether to return a sparse tensor; for backends that support
sparse tensors.
Call arguments:
inputs: A 1D or 2D tensor of integer inputs.
count_weights: A tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode.
Not used in `"multi_hot"` or `"one_hot"` modes.
"""
def __init__(
self, num_tokens=None, output_mode="multi_hot", sparse=False, **kwargs
):
super().__init__(**kwargs)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
# 'output_mode' must be one of ("count", "one_hot", "multi_hot")
if output_mode not in ("count", "one_hot", "multi_hot"):
raise ValueError(f"Unknown arg for output_mode: {output_mode}")
if num_tokens is None:
raise ValueError(
"num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead."
)
if num_tokens < 1:
raise ValueError(
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}."
)
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
self._allow_non_tensor_positional_args = True
self._convert_input_args = False
def _encode(self, inputs, count_weights=None):
inputs = self.backend.core.convert_to_tensor(inputs)
return numerical_utils.encode_categorical_inputs(
inputs,
output_mode=self.output_mode,
depth=self.num_tokens,
dtype=self.dtype,
sparse=self.sparse,
count_weights=count_weights,
backend_module=self.backend,
)
def compute_output_shape(self, input_shape):
if (input_shape is not None) & (len(input_shape) == 0):
return (self.num_tokens,)
if self.output_mode == "one_hot":
if input_shape[-1] != 1:
return tuple(input_shape) + (self.num_tokens,)
elif len(input_shape) == 1:
return tuple(input_shape) + (self.num_tokens,)
else:
return tuple(input_shape[:-1]) + (self.num_tokens,)
return tuple(input_shape[:-1]) + (self.num_tokens,)
def compute_output_spec(self, inputs, count_weights=None):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
output_shape, dtype=self.compute_dtype, sparse=self.sparse
)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
}
base_config = super().get_config()
return {**base_config, **config}
def call(self, inputs, count_weights=None):
if count_weights is not None:
if self.output_mode != "count":
raise ValueError(
"`count_weights` is not used when `output_mode` is not "
f"`'count'`. Received `count_weights={count_weights}`."
)
count_weights = self.backend.convert_to_tensor(
count_weights, dtype=self.compute_dtype
)
outputs = self._encode(inputs, count_weights)
return backend_utils.convert_tf_tensor(outputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/pipeline_test.py | keras/src/layers/preprocessing/pipeline_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Fails on CI, passes locally. TODO: debug",
)
def test_from_config(self):
pipeline = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
x = np.ones((2, 10, 12, 3))
output = pipeline(x)
restored = layers.Pipeline.from_config(pipeline.get_config())
restored_output = restored(x)
self.assertEqual(tuple(output.shape), (2, 8, 9, 3))
self.assertAllClose(output, restored_output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/normalization_test.py | keras/src/layers/preprocessing/normalization_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
class NormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_normalization_basics(self):
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=3,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
"mean": np.array([0.5, 0.2, -0.1]),
"variance": np.array([0.1, 0.2, 0.3]),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
"mean": np.array([0.5, 0.2, -0.1]),
"variance": np.array([0.1, 0.2, 0.3]),
"invert": True,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.parameters([("np",), ("tensor",), ("tf.data")])
def test_normalization_adapt(self, input_type):
x = np.random.random((32, 4))
if input_type == "np":
data = x
elif input_type == "tensor":
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
else:
raise NotImplementedError(input_type)
layer = layers.Normalization()
layer.adapt(data)
self.assertTrue(layer.built)
output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=0), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=0), 0.0, atol=1e-5)
# Test in high-dim and with tuple axis.
x = np.random.random((32, 4, 3, 5))
if input_type == "np":
data = x
elif input_type == "tensor":
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
layer = layers.Normalization(axis=(1, 2))
layer.adapt(data)
self.assertTrue(layer.built)
output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=(0, 3)), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=(0, 3)), 0.0, atol=1e-5)
@pytest.mark.skipif(
backend.backend() != "torch",
reason="Test symbolic call for torch meta device.",
)
def test_call_on_meta_device_after_built(self):
layer = layers.Normalization()
data = np.random.random((32, 4))
layer.adapt(data)
with backend.device("meta"):
layer(data)
def test_normalization_with_mean_only_raises_error(self):
# Test error when only `mean` is provided
with self.assertRaisesRegex(
ValueError, "both `mean` and `variance` must be set"
):
layers.Normalization(mean=0.5)
def test_normalization_with_variance_only_raises_error(self):
# Test error when only `variance` is provided
with self.assertRaisesRegex(
ValueError, "both `mean` and `variance` must be set"
):
layers.Normalization(variance=0.1)
def test_normalization_axis_too_high(self):
with self.assertRaisesRegex(
ValueError, "All `axis` values must be in the range"
):
layer = layers.Normalization(axis=3)
layer.build((2, 2))
def test_normalization_axis_too_low(self):
with self.assertRaisesRegex(
ValueError, "All `axis` values must be in the range"
):
layer = layers.Normalization(axis=-4)
layer.build((2, 3, 4))
def test_normalization_unknown_axis_shape(self):
with self.assertRaisesRegex(ValueError, "All `axis` values to be kept"):
layer = layers.Normalization(axis=1)
layer.build((None, None))
def test_normalization_adapt_with_incompatible_shape(self):
layer = layers.Normalization(axis=-1)
initial_shape = (10, 5)
layer.build(initial_shape)
new_shape_data = np.random.random((10, 3))
with self.assertRaisesRegex(ValueError, "an incompatible shape"):
layer.adapt(new_shape_data)
def test_tf_data_compatibility(self):
x = np.random.random((32, 3))
ds = tf_data.Dataset.from_tensor_slices(x).batch(1)
# With built-in values
layer = layers.Normalization(
mean=[0.1, 0.2, 0.3], variance=[0.1, 0.2, 0.3], axis=-1
)
layer.build((None, 3))
for output in ds.map(layer).take(1):
output.numpy()
# With adapt flow
layer = layers.Normalization(axis=-1)
layer.adapt(
np.random.random((32, 3)),
)
for output in ds.map(layer).take(1):
output.numpy()
def test_normalization_with_scalar_mean_var(self):
input_data = np.array([[1, 2, 3]], dtype="float32")
layer = layers.Normalization(mean=3.0, variance=2.0)
layer(input_data)
@parameterized.parameters([("x",), ("x_and_y",), ("x_y_and_weights",)])
def test_adapt_pydataset_compat(self, pydataset_type):
import keras
class CustomDataset(PyDataset):
def __len__(self):
return 100
def __getitem__(self, idx):
x = np.random.rand(32, 32, 3)
y = np.random.randint(0, 10, size=(1,))
weights = np.random.randint(0, 10, size=(1,))
if pydataset_type == "x":
return x
elif pydataset_type == "x_and_y":
return x, y
elif pydataset_type == "x_y_and_weights":
return x, y, weights
else:
raise NotImplementedError(pydataset_type)
normalizer = keras.layers.Normalization()
normalizer.adapt(CustomDataset())
self.assertTrue(normalizer.built)
self.assertIsNotNone(normalizer.mean)
self.assertIsNotNone(normalizer.variance)
self.assertEqual(normalizer.mean.shape[-1], 3)
self.assertEqual(normalizer.variance.shape[-1], 3)
sample_input = np.random.rand(1, 32, 32, 3)
output = normalizer(sample_input)
self.assertEqual(output.shape, (1, 32, 32, 3))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/stft_spectrogram_test.py | keras/src/layers/preprocessing/stft_spectrogram_test.py | import numpy as np
import pytest
import scipy.signal
import tensorflow as tf
from keras import Input
from keras import Sequential
from keras.src import backend
from keras.src import layers
from keras.src import testing
class TestSpectrogram(testing.TestCase):
DTYPE = "float32"
@staticmethod
def _calc_spectrograms(
x, mode, scaling, window, periodic, frame_length, frame_step, fft_length
):
data_format = backend.image_data_format()
input_shape = (None, 1) if data_format == "channels_last" else (1, None)
layer = Sequential(
[
Input(shape=input_shape, dtype=TestSpectrogram.DTYPE),
layers.STFTSpectrogram(
mode=mode,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length,
window=window,
scaling=scaling,
periodic=periodic,
dtype=TestSpectrogram.DTYPE,
),
]
)
if data_format == "channels_first":
y = layer.predict(np.transpose(x, [0, 2, 1]), verbose=0)
y = np.transpose(y, [0, 2, 1])
else:
y = layer.predict(x, verbose=0)
window_arr = scipy.signal.get_window(window, frame_length, periodic)
_, _, spec = scipy.signal.spectrogram(
x[..., 0].astype(TestSpectrogram.DTYPE),
window=window_arr.astype(TestSpectrogram.DTYPE),
nperseg=frame_length,
noverlap=frame_length - frame_step,
mode=mode,
scaling=scaling,
detrend=False,
nfft=fft_length,
)
y_true = np.transpose(spec, [0, 2, 1])
return y_true, y
@pytest.mark.requires_trainable_backend
def test_spectrogram_channels_broadcasting(self):
rnd = np.random.RandomState(41)
audio = rnd.uniform(-1, 1, size=(3, 16000, 7))
layer_last = Sequential(
[
Input(shape=(None, 7), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd", dtype=self.DTYPE, data_format="channels_last"
),
]
)
layer_single = Sequential(
[
Input(shape=(None, 1), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd", dtype=self.DTYPE, data_format="channels_last"
),
]
)
layer_expand = Sequential(
[
Input(shape=(None, 7), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd",
dtype=self.DTYPE,
data_format="channels_last",
expand_dims=True,
),
]
)
y_last = layer_last.predict(audio, verbose=0)
y_expanded = layer_expand.predict(audio, verbose=0)
y_singles = [
layer_single.predict(audio[..., i : i + 1], verbose=0)
for i in range(audio.shape[-1])
]
self.assertAllClose(y_last, np.concatenate(y_singles, axis=-1))
self.assertAllClose(y_expanded, np.stack(y_singles, axis=-1))
@pytest.mark.skipif(
backend.backend() == "tensorflow",
reason="TF doesn't support channels_first",
)
@pytest.mark.requires_trainable_backend
def test_spectrogram_channels_first(self):
rnd = np.random.RandomState(41)
audio = rnd.uniform(-1, 1, size=(3, 16000, 7))
layer_first = Sequential(
[
Input(shape=(7, None), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd", dtype=self.DTYPE, data_format="channels_first"
),
]
)
layer_last = Sequential(
[
Input(shape=(None, 7), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd", dtype=self.DTYPE, data_format="channels_last"
),
]
)
layer_single = Sequential(
[
Input(shape=(None, 1), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd", dtype=self.DTYPE, data_format="channels_last"
),
]
)
layer_expand = Sequential(
[
Input(shape=(7, None), dtype=self.DTYPE),
layers.STFTSpectrogram(
mode="psd",
dtype=self.DTYPE,
data_format="channels_first",
expand_dims=True,
),
]
)
y_singles = [
layer_single.predict(audio[..., i : i + 1], verbose=0)
for i in range(audio.shape[-1])
]
y_expanded = layer_expand.predict(
np.transpose(audio, [0, 2, 1]), verbose=0
)
y_last = layer_last.predict(audio, verbose=0)
y_first = layer_first.predict(np.transpose(audio, [0, 2, 1]), verbose=0)
self.assertAllClose(np.transpose(y_first, [0, 2, 1]), y_last)
self.assertAllClose(y_expanded, np.stack(y_singles, axis=1))
self.assertAllClose(
y_first,
np.transpose(np.concatenate(y_singles, axis=-1), [0, 2, 1]),
)
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 150,
"frame_step": 10,
"fft_length": 512,
"trainable": False,
"padding": "same",
"expand_dims": True,
"data_format": "channels_first",
},
input_shape=(2, 3, 160000),
expected_output_shape=(2, 3, 160000 // 10, 257),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@pytest.mark.requires_trainable_backend
def test_spectrogram_basics(self):
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 500,
"frame_step": 25,
"fft_length": 1024,
"mode": "stft",
"data_format": "channels_last",
},
input_shape=(2, 16000, 1),
expected_output_shape=(2, 15500 // 25 + 1, 513 * 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 150,
"frame_step": 71,
"fft_length": 4096,
"mode": "real",
"data_format": "channels_last",
},
input_shape=(2, 160000, 1),
expected_output_shape=(2, 159850 // 71 + 1, 2049),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 150,
"frame_step": 43,
"fft_length": 512,
"mode": "imag",
"padding": "same",
"data_format": "channels_last",
},
input_shape=(2, 160000, 1),
expected_output_shape=(2, 160000 // 43 + 1, 257),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 150,
"frame_step": 10,
"fft_length": 512,
"trainable": False,
"padding": "same",
"data_format": "channels_last",
},
input_shape=(2, 160000, 3),
expected_output_shape=(2, 160000 // 10, 257 * 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.STFTSpectrogram,
init_kwargs={
"frame_length": 150,
"frame_step": 10,
"fft_length": 512,
"trainable": False,
"padding": "same",
"expand_dims": True,
"data_format": "channels_last",
},
input_shape=(2, 160000, 3),
expected_output_shape=(2, 160000 // 10, 257, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Backend does not support dynamic shapes",
)
def test_spectrogram_dynamic_shape(self):
model = Sequential(
[
Input(shape=(None, 1), dtype=TestSpectrogram.DTYPE),
layers.STFTSpectrogram(
frame_length=500,
frame_step=25,
fft_length=1024,
mode="stft",
data_format="channels_last",
),
]
)
def generator():
yield (np.random.random((2, 16000, 1)),)
yield (np.random.random((3, 8000, 1)),)
model.predict(generator())
@pytest.mark.requires_trainable_backend
def test_spectrogram_error(self):
rnd = np.random.RandomState(41)
x = rnd.uniform(low=-1, high=1, size=(4, 160000, 1)).astype(self.DTYPE)
names = [
"scaling",
"window",
"periodic",
"frame_length",
"frame_step",
"fft_length",
]
for args in [
("density", "hann", False, 512, 256, 1024),
("spectrum", "blackman", True, 512, 32, 1024),
("spectrum", "hamming", True, 256, 192, 512),
("spectrum", "tukey", False, 512, 128, 512),
("density", "hamming", True, 256, 256, 256),
("density", "hann", True, 256, 128, 256),
]:
init_args = dict(zip(names, args))
if testing.uses_tpu():
tol_kwargs = {"atol": 5e-2, "rtol": 1e-3}
else:
tol_kwargs = {"atol": 5e-4, "rtol": 1e-6}
init_args["mode"] = "magnitude"
y_true, y = self._calc_spectrograms(x, **init_args)
self.assertEqual(np.shape(y_true), np.shape(y))
self.assertAllClose(y_true, y, **tol_kwargs)
init_args["mode"] = "psd"
y_true, y = self._calc_spectrograms(x, **init_args)
self.assertEqual(np.shape(y_true), np.shape(y))
self.assertAllClose(y_true, y, **tol_kwargs)
init_args["mode"] = "angle"
y_true, y = self._calc_spectrograms(x, **init_args)
mask = np.isclose(y, y_true, **tol_kwargs)
mask |= np.isclose(y + 2 * np.pi, y_true, **tol_kwargs)
mask |= np.isclose(y - 2 * np.pi, y_true, **tol_kwargs)
mask |= np.isclose(np.cos(y), np.cos(y_true), **tol_kwargs)
mask |= np.isclose(np.sin(y), np.sin(y_true), **tol_kwargs)
self.assertLess(np.mean(~mask), 2e-4)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Requires TF tensors for TF-data module.",
)
def test_tf_data_compatibility(self):
input_shape = (2, 16000, 1)
output_shape = (2, 16000 // 128, 358)
layer = layers.STFTSpectrogram(
frame_length=256,
frame_step=128,
fft_length=715,
padding="same",
scaling=None,
)
input_data = np.random.random(input_shape)
ds = tf.data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_exceptions(self):
with self.assertRaises(ValueError):
layers.STFTSpectrogram(
frame_length=256, frame_step=1024, fft_length=512
)
with self.assertRaises(ValueError):
layers.STFTSpectrogram(
frame_length=256, frame_step=0, fft_length=512
)
with self.assertRaises(ValueError):
layers.STFTSpectrogram(
frame_length=256, frame_step=32, fft_length=128
)
with self.assertRaises(ValueError):
layers.STFTSpectrogram(padding="mypadding")
with self.assertRaises(ValueError):
layers.STFTSpectrogram(scaling="l2")
with self.assertRaises(ValueError):
layers.STFTSpectrogram(mode="spectrogram")
with self.assertRaises(ValueError):
layers.STFTSpectrogram(window="unknowable")
with self.assertRaises(ValueError):
layers.STFTSpectrogram(scaling="l2")
with self.assertRaises(ValueError):
layers.STFTSpectrogram(padding="divide")
with self.assertRaises(TypeError):
layers.STFTSpectrogram()(
np.random.randint(0, 255, size=(2, 16000, 1))
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/hashing_test.py | keras/src/layers/preprocessing/hashing_test.py | import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.saving import load_model
class ArrayLike:
def __init__(self, values):
self.values = values
def __array__(self):
return np.array(self.values)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="Broken with NumPy backend."
)
class HashingTest(testing.TestCase):
def test_config(self):
layer = layers.Hashing(
num_bins=8,
output_mode="int",
)
self.run_class_serialization_test(layer)
def test_correctness(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))
layer = layers.Hashing(num_bins=3, mask_value="")
inp = [["A"], ["B"], [""], ["C"], ["D"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [1], [0], [2], [2]]))
layer = layers.Hashing(num_bins=3, salt=[133, 137])
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [2], [1], [0], [2]]))
layer = layers.Hashing(num_bins=3, salt=133)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[0], [0], [2], [1], [0]]))
def test_tf_data_compatibility(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
ds = tf.data.Dataset.from_tensor_slices(inp).batch(5).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))
@parameterized.named_parameters(
("list", list),
("tuple", tuple),
("numpy", np.array),
("array_like", ArrayLike),
)
def test_tensor_like_inputs(self, data_fn):
input_data = data_fn([0, 1, 2, 3, 4])
expected_output = [1, 0, 1, 0, 2]
layer = layers.Hashing(num_bins=3)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
def test_hash_single_bin(self):
layer = layers.Hashing(num_bins=1)
inp = np.asarray([["A"], ["B"], ["C"], ["D"], ["E"]])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
inp = [["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ["omar", "stringer", "marlo", "wire", "skywalker"]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = layers.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = layers.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = layers.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_int_input_farmhash(self):
layer = layers.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_siphash(self):
layer = layers.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_int_input_siphash(self):
layer = layers.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = layers.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = layers.Hashing(num_bins=-1)
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=2, salt="string")
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=1, salt=[133, 137, 177])
def test_one_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
expected_output_shape = [None, 3]
inputs = layers.Input(shape=(1,), dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="one_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape)
model = models.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllClose(expected_output, output_data)
def test_multi_hot_output(self):
input_array = np.array([[0, 1, 2, 3, 4]])
expected_output = [[1.0, 1.0, 1.0]]
expected_output_shape = [None, 3]
inputs = layers.Input(shape=(None,), dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="multi_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape)
model = models.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllClose(expected_output, output_data)
@parameterized.named_parameters(
(
"1d_input",
[0, 1, 2, 3, 4],
[2.0, 2.0, 1.0],
[3],
),
(
"2d_input",
[[0, 1, 2, 3, 4]],
[[2.0, 2.0, 1.0]],
[None, 3],
),
)
def test_count_output(self, input_value, expected_output, output_shape):
input_array = np.array(input_value)
if input_array.ndim == 1:
symbolic_sample_shape = ()
elif input_array.ndim == 2:
symbolic_sample_shape = (None,)
else:
raise TypeError("Unknown `symbolic_sample_shape`")
inputs = layers.Input(shape=symbolic_sample_shape, dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
self.assertAllEqual(output_shape, outputs.shape)
output_data = layer(input_array)
self.assertAllEqual(expected_output, output_data)
@parameterized.named_parameters(
("int32", "int32"),
("int64", "int64"),
)
def test_int_output_dtype(self, dtype):
input_data = layers.Input(batch_size=16, shape=(4,), dtype="string")
layer = layers.Hashing(num_bins=3, output_mode="int", dtype=dtype)
output = layer(input_data)
self.assertEqual(output.dtype, dtype)
@parameterized.named_parameters(
("float32", "float32"),
("float64", "float64"),
)
def test_one_hot_output_dtype(self, dtype):
input_data = layers.Input(batch_size=16, shape=(1,), dtype="string")
layer = layers.Hashing(num_bins=3, output_mode="one_hot", dtype=dtype)
output = layer(input_data)
self.assertEqual(output.dtype, dtype)
def test_config_with_custom_name(self):
layer = layers.Hashing(num_bins=2, name="hashing")
config = layer.get_config()
layer_1 = layers.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses string dtype."
)
def test_saving(self):
input_data = np.array(
["omar", "stringer", "marlo", "wire", "skywalker"]
)
inputs = layers.Input(shape=(), dtype="string")
outputs = layers.Hashing(num_bins=100)(inputs)
model = models.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "keras_model.keras")
model.save(output_path)
loaded_model = load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@parameterized.named_parameters(
(
"list_input",
[1, 2, 3],
[1, 1, 1],
),
(
"list_input_2d",
[[1], [2], [3]],
[[1], [1], [1]],
),
(
"list_input_2d_multiple",
[[1, 2], [2, 3], [3, 4]],
[[1, 1], [1, 1], [1, 1]],
),
(
"list_input_3d",
[[[1], [2]], [[2], [3]], [[3], [4]]],
[[[1], [1]], [[1], [1]], [[1], [1]]],
),
)
def test_hash_list_input(self, input_data, expected):
layer = layers.Hashing(num_bins=2)
out_data = layer(input_data)
self.assertAllEqual(
expected, backend.convert_to_numpy(out_data).tolist()
)
def test_hashing_invalid_num_bins(self):
# Test with `num_bins` set to None
with self.assertRaisesRegex(
ValueError,
"The `num_bins` for `Hashing` cannot be `None` or non-positive",
):
layers.Hashing(num_bins=None)
# Test with `num_bins` set to 0
with self.assertRaisesRegex(
ValueError,
"The `num_bins` for `Hashing` cannot be `None` or non-positive",
):
layers.Hashing(num_bins=0)
def test_hashing_invalid_output_mode(self):
# Test with an unsupported `output_mode`
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `output_mode`. Expected one of",
):
layers.Hashing(num_bins=3, output_mode="unsupported_mode")
def test_hashing_invalid_dtype_for_int_mode(self):
with self.assertRaisesRegex(
ValueError,
'When `output_mode="int"`, `dtype` should be an integer type,',
):
layers.Hashing(num_bins=3, output_mode="int", dtype="float32")
def test_hashing_sparse_with_int_mode(self):
# Test setting `sparse=True` with `output_mode='int'`
with self.assertRaisesRegex(
ValueError, "`sparse` may only be true if `output_mode` is"
):
layers.Hashing(num_bins=3, output_mode="int", sparse=True)
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_string_input_farmhash(self):
# layer = layers.Hashing(num_bins=2)
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[0, 0, 1, 0], [1, 0, 0]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="string")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_input_mask_value(self):
# empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
# omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# empty_mask_output = empty_mask_layer(inp_data)
# omar_mask_output = omar_mask_layer(inp_data)
# # Outputs should be one more than test_hash_ragged_string_input_farmhash
# # (the zeroth bin is now reserved for masks).
# expected_output = [[1, 1, 2, 1], [2, 1, 1]]
# self.assertAllClose(expected_output[0], empty_mask_output[1])
# self.assertAllClose(expected_output[1], empty_mask_output[2])
# # 'omar' should map to 0.
# expected_output = [[0, 1, 2, 1], [2, 1, 1]]
# self.assertAllClose(expected_output[0], omar_mask_output[0])
# self.assertAllClose(expected_output[1], omar_mask_output[1])
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_int_input_farmhash(self):
# layer = layers.Hashing(num_bins=3)
# inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype="int64")
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[1, 0, 0, 2], [1, 0, 1]]
# self.assertAllEqual(expected_output[0], out_data[0])
# self.assertAllEqual(expected_output[1], out_data[1])
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="int64")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_string_input_siphash(self):
# layer = layers.Hashing(num_bins=2, salt=[133, 137])
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# out_data = layer(inp_data)
# # Same hashed output as test_hash_dense_input_siphash
# expected_output = [[0, 1, 0, 1], [0, 0, 1]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="string")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
# out_data = layer_2(inp_data)
# expected_output = [[1, 0, 1, 0], [1, 1, 0]]
# self.assertAllEqual(expected_output, out_data)
# out_t = layer_2(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_int_input_siphash(self):
# layer = layers.Hashing(num_bins=3, salt=[133, 137])
# inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype="int64")
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[1, 1, 0, 1], [2, 1, 1]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="int64")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/mel_spectrogram_test.py | keras/src/layers/preprocessing/mel_spectrogram_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MelSpectrogramTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_mel_spectrogram_basics(self):
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(2, 16000),
expected_output_shape=(2, 80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(16000,),
expected_output_shape=(80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((2, 16000), 80, 128, 2048, 8000, False),
((16000,), 80, 128, 2048, 8000, False),
((2, 16001), 80, 128, 2048, 16000, False),
((16001,), 80, 128, 2048, 8000, False),
((2, 8000), 128, 64, 512, 32000, False),
((8000,), 128, 64, 512, 32000, False),
((2, 8000), 128, 64, 512, 32000, True),
((8000,), 128, 64, 512, 32000, True),
]
)
def test_output_shape(
self,
input_shape,
num_mel_bins,
sequence_stride,
fft_length,
sampling_rate,
all_zero,
):
if all_zero:
audios = np.zeros(input_shape)
else:
audios = np.random.random(input_shape)
out = layers.MelSpectrogram(
num_mel_bins=num_mel_bins,
sequence_stride=sequence_stride,
fft_length=fft_length,
sampling_rate=sampling_rate,
)(audios)
if len(input_shape) == 1:
ref_shape = (
num_mel_bins,
(input_shape[0] + sequence_stride + 1) // sequence_stride,
)
else:
ref_shape = (
input_shape[0],
num_mel_bins,
(input_shape[1] + sequence_stride + 1) // sequence_stride,
)
self.assertEqual(tuple(out.shape), ref_shape)
def test_tf_data_compatibility(self):
input_shape = (2, 16000)
output_shape = (2, 80, 126)
layer = layers.MelSpectrogram(
num_mel_bins=80,
sampling_rate=8000,
sequence_stride=128,
fft_length=2048,
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/data_layer.py | keras/src/layers/preprocessing/data_layer.py | import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import jax_utils
from keras.src.utils import tracking
class DataLayer(Layer):
"""Layer designed for safe use in `tf.data` or `grain` pipeline.
This layer overrides the `__call__` method to ensure that the correct
backend is used and that computation is performed on the CPU.
The `call()` method in subclasses should use `self.backend` ops. If
randomness is needed, define both `seed` and `generator` in `__init__` and
retrieve the running seed using `self._get_seed_generator()`. If the layer
has weights in `__init__` or `build()`, use `convert_weight()` to ensure
they are in the correct backend.
**Note:** This layer and its subclasses only support a single input tensor.
Examples:
**Custom `DataLayer` subclass:**
```python
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random import SeedGenerator
class BiasedRandomRGBToHSVLayer(DataLayer):
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.probability_bias = ops.convert_to_tensor(0.01)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
probability = self.backend.numpy.add(
probability, self.convert_weight(self.probability_bias)
)
hsv_images = self.backend.image.rgb_to_hsv(inputs)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5,
hsv_images,
inputs,
)
def compute_output_shape(self, input_shape):
return input_shape
```
**Using as a regular Keras layer:**
```python
import numpy as np
x = np.random.uniform(size=(1, 16, 16, 3)).astype("float32")
print(BiasedRandomRGBToHSVLayer()(x).shape) # (1, 16, 16, 3)
```
**Using in a `tf.data` pipeline:**
```python
import tensorflow as tf
tf_ds = tf.data.Dataset.from_tensors(x)
tf_ds = tf_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in tf_ds]) # [(1, 16, 16, 3)]
```
**Using in a `grain` pipeline:**
```python
import grain
grain_ds = grain.MapDataset.source([x])
grain_ds = grain_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in grain_ds]) # [(1, 16, 16, 3)]
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
sample_input = tree.flatten(inputs)[0]
if (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_tf_graph()
and not jax_utils.is_in_jax_tracing_scope(sample_input)
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
elif (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_grain_data_pipeline()
):
# We're in a Grain data pipeline. Force computation and data
# placement to CPU.
with keras.src.backend.device_scope("cpu"):
return super().__call__(inputs, **kwargs)
else:
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if not hasattr(self, "seed") or not hasattr(self, "generator"):
raise ValueError(
"The `seed` and `generator` variable must be set in the "
"`__init__` method before calling `_get_seed_generator()`."
)
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/string_lookup_test.py | keras/src/layers/preprocessing/string_lookup_test.py | import os
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
self.assertEqual(layer.get_config()["vocabulary"], ["a", "b", "c"])
def test_vocabulary_file(self):
temp_dir = self.get_temp_dir()
vocab_path = os.path.join(temp_dir, "vocab.txt")
with open(vocab_path, "w") as file:
file.write("a\nb\nc\n")
layer = layers.StringLookup(
output_mode="int",
vocabulary=vocab_path,
oov_token="[OOV]",
mask_token="[MASK]",
name="index",
)
self.assertEqual(
[str(v) for v in layer.get_vocabulary()],
["[MASK]", "[OOV]", "a", "b", "c"],
)
self.assertIsNone(layer.get_config().get("vocabulary", None))
# Make sure vocabulary comes from the archive, not the original file.
os.remove(vocab_path)
model = models.Sequential([layer])
model_path = os.path.join(temp_dir, "test_model.keras")
model.save(model_path)
reloaded_model = saving.load_model(model_path)
reloaded_layer = reloaded_model.get_layer("index")
self.assertEqual(
[str(v) for v in reloaded_layer.get_vocabulary()],
["[MASK]", "[OOV]", "a", "b", "c"],
)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
@pytest.mark.skipif(backend.backend() != "torch", reason="Only torch")
def test_torch_backend_compatibility(self):
import torch
# Forward lookup: String -> number
forward_lookup = layers.StringLookup(
vocabulary=["a", "b", "c"], oov_token="[OOV]"
)
input_data_str = ["a", "b", "[OOV]", "d"]
output_numeric = forward_lookup(input_data_str)
# assert instance of output is torch.Tensor
self.assertIsInstance(output_numeric, torch.Tensor)
expected_numeric = torch.tensor([1, 2, 0, 0])
self.assertAllClose(output_numeric.cpu(), expected_numeric)
oov = "[OOV]"
# Inverse lookup: Number -> string
inverse_lookup = layers.StringLookup(
vocabulary=["a", "b", "c"], oov_token=oov, invert=True
)
input_data_int = torch.tensor([1, 2, 0], dtype=torch.int64)
output_string = inverse_lookup(input_data_int)
# Assert that the output is a list
# See : https://docs.pytorch.org/text/stable/_modules/torchtext/vocab/vocab.html#Vocab.lookup_tokens
# The torch equivalent implementation of this returns a list of strings
self.assertIsInstance(output_string, list)
expected_string = ["a", "b", "[OOV]"]
self.assertEqual(output_string, expected_string)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/data_layer_test.py | keras/src/layers/preprocessing/data_layer_test.py | import grain
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import testing
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random import SeedGenerator
class RandomRGBToHSVLayer(DataLayer):
def __init__(self, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
hsv_images = self.backend.image.rgb_to_hsv(
inputs, data_format=self.data_format
)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5, hsv_images, inputs
)
def compute_output_shape(self, input_shape):
return input_shape
class DataLayerTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
RandomRGBToHSVLayer,
init_kwargs={
"seed": 1337,
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
RandomRGBToHSVLayer,
init_kwargs={
"seed": 1337,
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)).astype("float32")
else:
input_data = np.random.random((2, 3, 8, 8)).astype("float32")
layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
self.assertDType(output, "float32")
self.assertEqual(list(output.shape), list(input_data.shape))
def test_grain_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)).astype("float32")
else:
input_data = np.random.random((2, 3, 8, 8)).astype("float32")
layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337)
ds = grain.MapDataset.source(input_data).batch(2).map(layer)
for output in ds[:1]:
self.assertDType(output, "float32")
self.assertEqual(list(output.shape), list(input_data.shape))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/rescaling.py | keras/src/layers/preprocessing/rescaling.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Rescaling")
class Rescaling(DataLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference. Inputs can be
of integer or floating point dtype, and by default the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, scale, offset=0.0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.offset = offset
self.supports_masking = True
def call(self, inputs):
dtype = self.compute_dtype
scale = self.backend.cast(self.scale, dtype)
offset = self.backend.cast(self.offset, dtype)
scale_shape = self.backend.core.shape(scale)
if (
len(scale_shape) > 0
and backend.image_data_format() == "channels_first"
):
scale = self.backend.numpy.reshape(
scale, scale_shape + (1,) * (3 - len(scale_shape))
)
return self.backend.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
# `scale` and `offset` might be numpy array.
"scale": serialization_lib.serialize_keras_object(self.scale),
"offset": serialization_lib.serialize_keras_object(self.offset),
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
config["scale"] = serialization_lib.deserialize_keras_object(
config["scale"], custom_objects=custom_objects
)
config["offset"] = serialization_lib.deserialize_keras_object(
config["offset"], custom_objects=custom_objects
)
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/auto_contrast_test.py | keras/src/layers/preprocessing/image_preprocessing/auto_contrast_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AutoContrast,
init_kwargs={
"value_range": (20, 200),
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype="float32",
)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype="uint8")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertAllClose(
ops.convert_to_numpy(ys[0]), np.array([[[0.0]], [[1]]])
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration_test.py | keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomColorDegenerationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomColorDegeneration,
init_kwargs={
"factor": 0.75,
"value_range": (0, 1),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_color_degeneration_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomColorDegeneration(0.2, value_range=(0, 1))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_color_degeneration_no_op(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomColorDegeneration((0.5, 0.5))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_color_degeneration_factor_zero(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomColorDegeneration(factor=(0.0, 0.0))
result = layer(inputs)
self.assertAllClose(inputs, result, atol=1e-3, rtol=1e-5)
def test_random_color_degeneration_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomColorDegeneration(0.2)
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomColorDegeneration(
factor=0.5, data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_posterization_test.py | keras/src/layers/preprocessing/image_preprocessing/random_posterization_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomPosterizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomPosterization,
init_kwargs={
"factor": 1,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_posterization_inference(self):
seed = 3481
layer = layers.RandomPosterization(1, [0, 255])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_posterization_basic(self):
seed = 3481
layer = layers.RandomPosterization(
1, [0, 255], data_format="channels_last", seed=seed
)
np.random.seed(seed)
inputs = np.asarray(
[[[128.0, 235.0, 87.0], [12.0, 1.0, 23.0], [24.0, 18.0, 121.0]]]
)
output = layer(inputs)
expected_output = np.asarray(
[[[128.0, 128.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]
)
self.assertAllClose(expected_output, output)
def test_random_posterization_value_range_0_to_1(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomPosterization(1, [0, 1.0])
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_posterization_value_range_0_to_255(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=255)
layer = layers.RandomPosterization(1, [0, 255])
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 255))
def test_random_posterization_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomPosterization(1, [0, 255])
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomPosterization(1, [0, 255])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py | keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class RandomGrayscaleTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
@parameterized.named_parameters(
("channels_last", "channels_last"), ("channels_first", "channels_first")
)
def test_grayscale_conversion(self, data_format):
if data_format == "channels_last":
xs = np.random.uniform(0, 255, size=(2, 4, 4, 3)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[-1], 3)
for img in transformed:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
else:
xs = np.random.uniform(0, 255, size=(2, 3, 4, 4)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[1], 3)
for img in transformed:
r, g, b = img[0], img[1], img[2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
def test_invalid_factor(self):
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=-0.1)
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=1.1)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)) * 255
else:
input_data = np.random.random((2, 3, 8, 8)) * 255
layer = layers.RandomGrayscale(factor=0.5, data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output_array = output.numpy()
self.assertEqual(output_array.shape, input_data.shape)
def test_grayscale_with_single_color_image(self):
test_cases = [
# batched inputs
(np.full((1, 4, 4, 3), 128, dtype=np.float32), "channels_last"),
(np.full((1, 3, 4, 4), 128, dtype=np.float32), "channels_first"),
# unbatched inputs
(np.full((4, 4, 3), 128, dtype=np.float32), "channels_last"),
(np.full((3, 4, 4), 128, dtype=np.float32), "channels_first"),
]
for xs, data_format in test_cases:
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
# Determine if the input was batched
is_batched = len(xs.shape) == 4
# If batched, select the first image from the batch for inspection.
# Otherwise, use the transformed image directly.
# `image_to_inspect` will always be a 3D tensor.
if is_batched:
image_to_inspect = transformed[0]
else:
image_to_inspect = transformed
if data_format == "channels_last":
# image_to_inspect has shape (H, W, C),
# get the first channel [:, :, 0]
channel_data = image_to_inspect[:, :, 0]
else: # data_format == "channels_first"
# image_to_inspect has shape (C, H, W),
# get the first channel [0, :, :]
channel_data = image_to_inspect[0, :, :]
unique_vals = np.unique(channel_data)
self.assertEqual(len(unique_vals), 1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_invert.py | keras/src/layers/preprocessing/image_preprocessing/random_invert.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomInvert")
class RandomInvert(BaseImagePreprocessingLayer):
"""Preprocessing layer for random inversion of image colors.
This layer randomly inverts the colors of input images with a specified
probability range. When applied, each image has a chance of having its
colors inverted, where the pixel values are transformed to their
complementary values. Images that are not selected for inversion
remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the probability of inverting the image colors.
If a tuple is provided, the value is sampled between the two values
for each image, where `factor[0]` is the minimum and `factor[1]` is
the maximum probability. If a single float is provided, a value
between `0.0` and the provided float is sampled.
Defaults to `(0, 1)`.
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Defaults to `(0, 255)`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor=1.0,
value_range=(0, 255),
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.value_range = value_range
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
seed = seed or self._get_seed_generator(self.backend._backend)
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
invert_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
apply_inversion = random_threshold < invert_probability
return {"apply_inversion": apply_inversion}
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
apply_inversion = transformation["apply_inversion"]
return self.backend.numpy.where(
apply_inversion[:, None, None, None],
self.value_range[1] - images,
images,
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/cut_mix_test.py | keras/src/layers/preprocessing/image_preprocessing/cut_mix_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CutMixTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.CutMix,
init_kwargs={
"factor": 1.0,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
# StatelessRandomGammaV3 is not supported on XLA_GPU_JIT
run_training_check=not testing.tensorflow_uses_gpu(),
)
def test_cut_mix_inference(self):
seed = 3481
layer = layers.CutMix()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_cut_mix_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image1 = np.ones((2, 2, 1))
image2 = np.zeros((2, 2, 1))
inputs = np.asarray([image1, image2])
expected_output = np.array(
[
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[0.0], [0.0]], [[0.0], [0.0]]],
]
)
else:
image1 = np.ones((1, 2, 2))
image2 = np.zeros((1, 2, 2))
inputs = np.asarray([image1, image2])
expected_output = np.asarray(
[
[[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]],
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
]
)
layer = layers.CutMix(data_format=data_format)
transformation = {
"batch_masks": np.asarray(
[
[[[False], [True]], [[False], [False]]],
[[[False], [False]], [[True], [False]]],
]
),
"mix_weight": np.asarray([[[[0.7826548]]], [[[0.8133545]]]]),
"permutation_order": np.asarray([0, 1]),
}
output = layer.transform_images(inputs, transformation)
self.assertAllClose(expected_output, output)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.CutMix(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box_test.py | keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box_test.py | import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class MaxNumBoundingBoxesTest(testing.TestCase):
def test_max_num_bounding_boxes_basics(self):
self.run_layer_test(
layers.MaxNumBoundingBoxes,
init_kwargs={
"max_number": 40,
"fill_value": -1,
},
input_shape=(12, 12, 3),
expected_output_shape=(12, 12, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_output_shapes(self):
if backend.config.image_data_format() == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
), # Example boxes (normalized)
"labels": np.array([1, 2]), # Dummy labels
}
layer = layers.MaxNumBoundingBoxes(
max_number=40, bounding_box_format="xyxy"
)
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
output = layer(input_data)
self.assertAllEqual(output["bounding_boxes"]["boxes"].shape, (40, 4))
self.assertAllEqual(output["bounding_boxes"]["labels"].shape, (40,))
def test_output_shapes_with_tf_data(self):
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
), # Example boxes (normalized)
"labels": np.array([[1, 2]]), # Dummy labels
}
layer = layers.MaxNumBoundingBoxes(
max_number=40, bounding_box_format="xyxy"
)
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
ds = ds.map(layer)
ds = ds.batch(1)
output = next(iter(ds))
self.assertAllEqual(output["bounding_boxes"]["boxes"].shape, (1, 40, 4))
self.assertAllEqual(output["bounding_boxes"]["labels"].shape, (1, 40))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_saturation_test.py | keras/src/layers/preprocessing/image_preprocessing/random_saturation_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomSaturationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomSaturation,
init_kwargs={
"factor": 0.75,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_saturation_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomSaturation(0.2)
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_saturation_no_op(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomSaturation((0.5, 0.5))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_saturation_full_grayscale(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomSaturation(factor=(0.0, 0.0))
result = layer(inputs)
if data_format == "channels_last":
self.assertAllClose(result[..., 0], result[..., 1])
self.assertAllClose(result[..., 1], result[..., 2])
else:
self.assertAllClose(result[:, 0, :, :], result[:, 1, :, :])
self.assertAllClose(result[:, 1, :, :], result[:, 2, :, :])
def test_random_saturation_full_saturation(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomSaturation(factor=(1.0, 1.0))
result = layer(inputs)
hsv = backend.image.rgb_to_hsv(result)
s_channel = hsv[..., 1]
self.assertAllClose(
keras.ops.numpy.max(s_channel), layer.value_range[1]
)
def test_random_saturation_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomSaturation(0.2)
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomSaturation(
factor=0.5, data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_shear_test.py | keras/src/layers/preprocessing/image_preprocessing/random_shear_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.utils import backend_utils
class RandomShearTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomShear,
init_kwargs={
"x_factor": (0.5, 1),
"y_factor": (0.5, 1),
"interpolation": "bilinear",
"fill_mode": "reflect",
"data_format": "channels_last",
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_posterization_inference(self):
seed = 3481
layer = layers.RandomShear(1, 1)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_shear_pixel_level(self):
image = np.zeros((1, 5, 5, 3))
image[0, 1:4, 1:4, :] = 1.0
image[0, 2, 2, :] = [0.0, 1.0, 0.0]
image = keras.ops.convert_to_tensor(image, dtype="float32")
data_format = backend.config.image_data_format()
if data_format == "channels_first":
image = keras.ops.transpose(image, (0, 3, 1, 2))
shear_layer = layers.RandomShear(
x_factor=(0.2, 0.3),
y_factor=(0.2, 0.3),
interpolation="bilinear",
fill_mode="constant",
fill_value=0.0,
seed=42,
data_format=data_format,
)
sheared_image = shear_layer(image)
if data_format == "channels_first":
sheared_image = keras.ops.transpose(sheared_image, (0, 2, 3, 1))
original_pixel = image[0, 2, 2, :]
sheared_pixel = sheared_image[0, 2, 2, :]
self.assertNotAllClose(original_pixel, sheared_pixel)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomShear(1, 1)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
@parameterized.named_parameters(
(
"with_x_shift",
[[1.0, 0.0]],
[[[0.0, 1.0, 3.2, 3.0], [1.2, 4.0, 4.8, 6.0]]],
),
(
"with_y_shift",
[[0.0, 1.0]],
[[[2.0, 0.0, 4.0, 0.5], [6.0, 0.0, 8.0, 0.0]]],
),
(
"with_xy_shift",
[[1.0, 1.0]],
[[[0.0, 0.0, 3.2, 3.5], [1.2, 0.0, 4.8, 4.5]]],
),
)
def test_random_shear_bounding_boxes(self, translation, expected_boxes):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
layer = layers.RandomShear(
x_factor=0.5,
y_factor=0.5,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"shear_factor": backend_utils.convert_tf_tensor(
np.array(translation)
),
"input_shape": image_shape,
}
output = layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"with_x_shift",
[[1.0, 0.0]],
[[[0.0, 1.0, 3.2, 3.0], [1.2, 4.0, 4.8, 6.0]]],
),
(
"with_y_shift",
[[0.0, 1.0]],
[[[2.0, 0.0, 4.0, 0.5], [6.0, 0.0, 8.0, 0.0]]],
),
(
"with_xy_shift",
[[1.0, 1.0]],
[[[0.0, 0.0, 3.2, 3.5], [1.2, 0.0, 4.8, 4.5]]],
),
)
def test_random_shear_tf_data_bounding_boxes(
self, translation, expected_boxes
):
data_format = backend.config.image_data_format()
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
layer = layers.RandomShear(
x_factor=0.5,
y_factor=0.5,
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"shear_factor": np.array(translation),
"input_shape": image_shape,
}
ds = ds.map(
lambda x: layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur.py | keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
@keras_export("keras.layers.RandomGaussianBlur")
class RandomGaussianBlur(BaseImagePreprocessingLayer):
"""Applies random Gaussian blur to images for data augmentation.
This layer performs a Gaussian blur operation on input images with a
randomly selected degree of blurring, controlled by the `factor` and
`sigma` arguments.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the extent to which the image hue is impacted.
`factor=0.0` makes this layer perform a no-op operation,
while a value of `1.0` performs the most aggressive
blurring available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. Default is 1.0.
kernel_size: Integer. Size of the Gaussian kernel used for blurring.
Must be an odd integer. Default is 3.
sigma: Float or tuple of two floats. Standard deviation of the Gaussian
kernel. Controls the intensity of the blur. If a tuple is provided,
a value is sampled between the two for each image. Default is 1.0.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor=1.0,
kernel_size=3,
sigma=1.0,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.kernel_size = self._set_kernel_size(kernel_size, "kernel_size")
self.sigma = self._set_factor_by_name(sigma, "sigma")
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_kernel_size(self, factor, name):
error_msg = f"{name} must be an odd number. Received: {name}={factor}"
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
f"Received: {name}={factor}"
)
raise ValueError(error_msg)
if (factor[0] % 2 == 0) or (factor[1] % 2 == 0):
raise ValueError(error_msg)
lower, upper = factor
elif isinstance(factor, (int, float)):
if factor % 2 == 0:
raise ValueError(error_msg)
lower, upper = factor, factor
else:
raise ValueError(error_msg)
return lower, upper
def _set_factor_by_name(self, factor, name):
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
return lower, upper
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
seed = seed or self._get_seed_generator(self.backend._backend)
blur_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
should_apply_blur = random_threshold < blur_probability
blur_factor = (
self.backend.random.uniform(
shape=(2,),
minval=self.sigma[0],
maxval=self.sigma[1],
seed=seed,
dtype=self.compute_dtype,
)
+ 1e-6
)
return {
"should_apply_blur": should_apply_blur,
"blur_factor": blur_factor,
}
def transform_images(self, images, transformation=None, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
blur_factor = transformation["blur_factor"]
should_apply_blur = transformation["should_apply_blur"]
blur_images = self.backend.image.gaussian_blur(
images,
kernel_size=self.kernel_size,
sigma=blur_factor,
data_format=self.data_format,
)
images = self.backend.numpy.where(
should_apply_blur[:, None, None, None],
blur_images,
images,
)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, dtype=self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"kernel_size": self.kernel_size,
"sigma": self.sigma,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_contrast.py | keras/src/layers/preprocessing/image_preprocessing/random_contrast.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomContrast")
class RandomContrast(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_FACTOR_BOUNDS = (0, 1)
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(**kwargs)
self._set_factor(factor)
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
factor_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
factor_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if not training:
return {"contrast_factor": self.backend.numpy.zeros(factor_shape)}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
shape=factor_shape,
minval=1.0 - self.factor[0],
maxval=1.0 + self.factor[1],
seed=seed,
dtype=self.compute_dtype,
)
return {"contrast_factor": factor}
def transform_images(self, images, transformation, training=True):
if training:
constrast_factor = transformation["contrast_factor"]
outputs = self._adjust_constrast(images, constrast_factor)
outputs = self.backend.numpy.clip(
outputs, self.value_range[0], self.value_range[1]
)
self.backend.numpy.reshape(outputs, self.backend.shape(images))
return outputs
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def _adjust_constrast(self, inputs, contrast_factor):
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
# reduce mean on height
inp_mean = self.backend.numpy.mean(
inputs, axis=height_axis, keepdims=True
)
# reduce mean on width
inp_mean = self.backend.numpy.mean(
inp_mean, axis=width_axis, keepdims=True
)
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py | keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py | import keras.src.layers.preprocessing.image_preprocessing.random_brightness as random_brightness # noqa: E501
import keras.src.layers.preprocessing.image_preprocessing.random_contrast as random_contrast # noqa: E501
import keras.src.layers.preprocessing.image_preprocessing.random_hue as random_hue # noqa: E501
import keras.src.layers.preprocessing.image_preprocessing.random_saturation as random_saturation # noqa: E501
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandomColorJitter")
class RandomColorJitter(BaseImagePreprocessingLayer):
"""RandomColorJitter class randomly apply brightness, contrast, saturation
and hue image processing operation sequentially and randomly on the
input.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
brightness_factor: Float or a list/tuple of 2 floats between -1.0
and 1.0. The factor is used to determine the lower bound and
upper bound of the brightness adjustment. A float value will
be chosen randomly between the limits. When -1.0 is chosen,
the output image will be black, and when 1.0 is chosen, the
image will be fully white. When only one float is provided,
eg, 0.2, then -0.2 will be used for lower bound and 0.2 will
be used for upper bound.
contrast_factor: a positive float represented as fraction of value,
or a tuple of size 2 representing lower and upper bound. When
represented as a single float, lower = upper. The contrast
factor will be randomly picked between `[1.0 - lower, 1.0 +
upper]`. For any pixel x in the channel, the output will be
`(x - mean) * factor + mean` where `mean` is the mean value
of the channel.
saturation_factor: A tuple of two floats or a single float. `factor`
controls the extent to which the image saturation is impacted.
`factor=0.5` makes this layer perform a no-op operation.
`factor=0.0` makes the image fully grayscale. `factor=1.0`
makes the image fully saturated. Values should be between
`0.0` and `1.0`. If a tuple is used, a `factor` is sampled
between the two values for every image augmented. If a single
float is used, a value between `0.0` and the passed float is
sampled. To ensure the value is always the same, pass a tuple
with two identical floats: `(0.5, 0.5)`.
hue_factor: A single float or a tuple of two floats. `factor`
controls the extent to which the image hue is impacted.
`factor=0.0` makes this layer perform a no-op operation,
while a value of `1.0` performs the most aggressive contrast
adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented.
If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is
always the same, please pass a tuple with two identical
floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
value_range=(0, 255),
brightness_factor=None,
contrast_factor=None,
saturation_factor=None,
hue_factor=None,
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.value_range = value_range
self.brightness_factor = brightness_factor
self.contrast_factor = contrast_factor
self.saturation_factor = saturation_factor
self.hue_factor = hue_factor
self.seed = seed
self.generator = SeedGenerator(seed)
self.random_brightness = None
self.random_contrast = None
self.random_saturation = None
self.random_hue = None
if self.brightness_factor is not None:
self.random_brightness = random_brightness.RandomBrightness(
factor=self.brightness_factor,
value_range=self.value_range,
seed=self.seed,
)
if self.contrast_factor is not None:
self.random_contrast = random_contrast.RandomContrast(
factor=self.contrast_factor,
value_range=self.value_range,
seed=self.seed,
)
if self.saturation_factor is not None:
self.random_saturation = random_saturation.RandomSaturation(
factor=self.saturation_factor,
value_range=self.value_range,
seed=self.seed,
)
if self.hue_factor is not None:
self.random_hue = random_hue.RandomHue(
factor=self.hue_factor,
value_range=self.value_range,
seed=self.seed,
)
def build(self, input_shape):
if self.brightness_factor is not None:
self.random_brightness.build(input_shape)
if self.contrast_factor is not None:
self.random_contrast.build(input_shape)
if self.saturation_factor is not None:
self.random_saturation.build(input_shape)
if self.hue_factor is not None:
self.random_hue.build(input_shape)
def transform_images(self, images, transformation, training=True):
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
images = self.backend.cast(images, self.compute_dtype)
if self.brightness_factor is not None:
if backend_utils.in_tf_graph():
self.random_brightness.backend.set_backend("tensorflow")
transformation = (
self.random_brightness.get_random_transformation(
images,
seed=self._get_seed_generator(self.backend._backend),
)
)
images = self.random_brightness.transform_images(
images, transformation
)
if self.contrast_factor is not None:
if backend_utils.in_tf_graph():
self.random_contrast.backend.set_backend("tensorflow")
transformation = self.random_contrast.get_random_transformation(
images, seed=self._get_seed_generator(self.backend._backend)
)
transformation["contrast_factor"] = self.backend.cast(
transformation["contrast_factor"], dtype=self.compute_dtype
)
images = self.random_contrast.transform_images(
images, transformation
)
if self.saturation_factor is not None:
if backend_utils.in_tf_graph():
self.random_saturation.backend.set_backend("tensorflow")
transformation = (
self.random_saturation.get_random_transformation(
images,
seed=self._get_seed_generator(self.backend._backend),
)
)
images = self.random_saturation.transform_images(
images, transformation
)
if self.hue_factor is not None:
if backend_utils.in_tf_graph():
self.random_hue.backend.set_backend("tensorflow")
transformation = self.random_hue.get_random_transformation(
images, seed=self._get_seed_generator(self.backend._backend)
)
images = self.random_hue.transform_images(
images, transformation
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"value_range": self.value_range,
"brightness_factor": self.brightness_factor,
"contrast_factor": self.contrast_factor,
"saturation_factor": self.saturation_factor,
"hue_factor": self.hue_factor,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/equalization_test.py | keras/src/layers/preprocessing/image_preprocessing/equalization_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import ops
from keras.src import testing
class EqualizationTest(testing.TestCase):
def assertAllInRange(self, array, min_val, max_val):
self.assertTrue(np.all(array >= min_val))
self.assertTrue(np.all(array <= max_val))
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.Equalization,
init_kwargs={
"value_range": (0, 255),
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.Equalization,
init_kwargs={
"value_range": (0, 255),
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
def test_equalizes_to_all_bins(self):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
np.float32
)
layer = layers.Equalization(value_range=(0, 255))
xs = layer(xs)
for i in range(0, 256):
self.assertTrue(np.any(ops.convert_to_numpy(xs) == i))
@parameterized.named_parameters(
("float32", np.float32), ("int32", np.int32), ("int64", np.int64)
)
def test_input_dtypes(self, dtype):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
dtype
)
layer = layers.Equalization(value_range=(0, 255))
xs = ops.convert_to_numpy(layer(xs))
for i in range(0, 256):
self.assertTrue(np.any(xs == i))
self.assertAllInRange(xs, 0, 255)
@parameterized.named_parameters(("0_255", 0, 255), ("0_1", 0, 1))
def test_output_range(self, lower, upper):
xs = np.random.uniform(
size=(2, 512, 512, 3), low=lower, high=upper
).astype(np.float32)
layer = layers.Equalization(value_range=(lower, upper))
xs = ops.convert_to_numpy(layer(xs))
self.assertAllInRange(xs, lower, upper)
def test_constant_regions(self):
xs = np.zeros((1, 64, 64, 3), dtype=np.float32)
xs[:, :21, :, :] = 50
xs[:, 21:42, :, :] = 100
xs[:, 42:, :, :] = 200
layer = layers.Equalization(value_range=(0, 255))
equalized = ops.convert_to_numpy(layer(xs))
self.assertTrue(len(np.unique(equalized)) >= 3)
self.assertAllInRange(equalized, 0, 255)
def test_grayscale_images(self):
xs_last = np.random.uniform(0, 255, size=(2, 64, 64, 1)).astype(
np.float32
)
layer_last = layers.Equalization(
value_range=(0, 255), data_format="channels_last"
)
equalized_last = ops.convert_to_numpy(layer_last(xs_last))
self.assertEqual(equalized_last.shape[-1], 1)
self.assertAllInRange(equalized_last, 0, 255)
xs_first = np.random.uniform(0, 255, size=(2, 1, 64, 64)).astype(
np.float32
)
layer_first = layers.Equalization(
value_range=(0, 255), data_format="channels_first"
)
equalized_first = ops.convert_to_numpy(layer_first(xs_first))
self.assertEqual(equalized_first.shape[1], 1)
self.assertAllInRange(equalized_first, 0, 255)
def test_single_color_image(self):
xs_last = np.full((1, 64, 64, 3), 128, dtype=np.float32)
layer_last = layers.Equalization(
value_range=(0, 255), data_format="channels_last"
)
equalized_last = ops.convert_to_numpy(layer_last(xs_last))
self.assertAllClose(equalized_last, 128.0)
xs_first = np.full((1, 3, 64, 64), 128, dtype=np.float32)
layer_first = layers.Equalization(
value_range=(0, 255), data_format="channels_first"
)
equalized_first = ops.convert_to_numpy(layer_first(xs_first))
self.assertAllClose(equalized_first, 128.0)
def test_different_bin_sizes(self):
xs = np.random.uniform(0, 255, size=(1, 64, 64, 3)).astype(np.float32)
bin_sizes = [16, 64, 128, 256]
for bins in bin_sizes:
layer = layers.Equalization(value_range=(0, 255), bins=bins)
equalized = ops.convert_to_numpy(layer(xs))
self.assertAllInRange(equalized, 0, 255)
def test_tf_data_compatibility(self):
layer = layers.Equalization(value_range=(0, 255))
input_data = np.random.random((2, 8, 8, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output_array = output.numpy()
self.assertAllInRange(output_array, 0, 255)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py | keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py | import math
from keras.src.backend import config as backend_config
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import ( # noqa: E501
densify_bounding_boxes,
)
class BaseImagePreprocessingLayer(DataLayer):
_USE_BASE_FACTOR = True
_FACTOR_BOUNDS = (-1, 1)
def __init__(
self, factor=None, bounding_box_format=None, data_format=None, **kwargs
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.data_format = backend_config.standardize_data_format(data_format)
if self._USE_BASE_FACTOR:
factor = factor or 0.0
self._set_factor(factor)
elif factor is not None:
raise ValueError(
f"Layer {self.__class__.__name__} does not take "
f"a `factor` argument. Received: factor={factor}"
)
def _set_factor(self, factor):
error_msg = (
"The `factor` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
self.factor = lower, upper
def get_random_transformation(self, data, training=True, seed=None):
return None
def transform_images(self, images, transformation, training=True):
raise NotImplementedError()
def transform_labels(self, labels, transformation, training=True):
raise NotImplementedError()
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
raise NotImplementedError()
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
raise NotImplementedError()
def transform_single_image(self, image, transformation, training=True):
images = self.backend.numpy.expand_dims(image, axis=0)
outputs = self.transform_images(
images, transformation=transformation, training=training
)
return self.backend.numpy.squeeze(outputs, axis=0)
def transform_single_label(self, label, transformation, training=True):
labels = self.backend.numpy.expand_dims(label, axis=0)
outputs = self.transform_labels(
labels, transformation=transformation, training=training
)
return self.backend.numpy.squeeze(outputs, axis=0)
def transform_single_bounding_box(
self,
bounding_box,
transformation,
training=True,
):
bounding_boxes = self._format_single_input_bounding_box(bounding_box)
outputs = self.transform_bounding_boxes(
bounding_boxes,
transformation=transformation,
training=training,
)
bounding_box = self._format_single_output_bounding_box(outputs)
return bounding_box
def transform_single_segmentation_mask(
self, segmentation_mask, transformation, training=True
):
segmentation_masks = self.backend.numpy.expand_dims(
segmentation_mask, axis=0
)
outputs = self.transform_segmentation_masks(
segmentation_masks, transformation=transformation, training=training
)
return self.backend.numpy.squeeze(outputs, axis=0)
def _is_batched(self, maybe_image_batch):
shape = self.backend.core.shape(maybe_image_batch)
if len(shape) == 3:
return False
if len(shape) == 4:
return True
raise ValueError(
"Expected image tensor to have rank 3 (single image) "
f"or 4 (batch of images). Received: data.shape={shape}"
)
def call(self, data, training=True):
transformation = self.get_random_transformation(data, training=training)
if isinstance(data, dict):
is_batched = self._is_batched(data["images"])
if is_batched:
data["images"] = self.transform_images(
self.backend.convert_to_tensor(data["images"]),
transformation=transformation,
training=training,
)
else:
data["images"] = self.transform_single_image(
self.backend.convert_to_tensor(data["images"]),
transformation=transformation,
training=training,
)
if "bounding_boxes" in data:
if not self.bounding_box_format:
raise ValueError(
"You passed an input with a 'bounding_boxes' key, "
"but you didn't specify a bounding box format. "
"Pass a `bounding_box_format` argument to your "
f"{self.__class__.__name__} layer, e.g. "
"`bounding_box_format='xyxy'`."
)
bounding_boxes = densify_bounding_boxes(
data["bounding_boxes"],
is_batched=is_batched,
backend=self.backend,
)
if is_batched:
data["bounding_boxes"] = self.transform_bounding_boxes(
bounding_boxes,
transformation=transformation,
training=training,
)
else:
data["bounding_boxes"] = self.transform_single_bounding_box(
bounding_boxes,
transformation=transformation,
training=training,
)
if "labels" in data:
if is_batched:
data["labels"] = self.transform_labels(
self.backend.convert_to_tensor(data["labels"]),
transformation=transformation,
training=training,
)
else:
data["labels"] = self.transform_single_label(
self.backend.convert_to_tensor(data["labels"]),
transformation=transformation,
training=training,
)
if "segmentation_masks" in data:
if is_batched:
data["segmentation_masks"] = (
self.transform_segmentation_masks(
data["segmentation_masks"],
transformation=transformation,
training=training,
)
)
else:
data["segmentation_masks"] = (
self.transform_single_segmentation_mask(
data["segmentation_masks"],
transformation=transformation,
training=training,
)
)
return data
# `data` is just images.
if self._is_batched(data):
return self.transform_images(
self.backend.convert_to_tensor(data),
transformation=transformation,
training=training,
)
return self.transform_single_image(
self.backend.convert_to_tensor(data),
transformation=transformation,
training=training,
)
def _format_single_input_bounding_box(self, bounding_box):
for key in bounding_box:
if key == "labels":
bounding_box[key] = self.backend.numpy.expand_dims(
bounding_box[key], axis=0
)
if key == "boxes":
bounding_box[key] = self.backend.numpy.expand_dims(
bounding_box[key], axis=0
)
return bounding_box
def _format_single_output_bounding_box(self, bounding_boxes):
for key in bounding_boxes:
if key == "labels":
bounding_boxes[key] = self.backend.numpy.squeeze(
bounding_boxes[key], axis=0
)
if key == "boxes":
bounding_boxes[key] = self.backend.numpy.squeeze(
bounding_boxes[key], axis=0
)
return bounding_boxes
def get_config(self):
config = super().get_config()
if self.bounding_box_format is not None:
config.update(
{
"bounding_box_format": self.bounding_box_format,
}
)
return config
def _transform_value_range(
self, images, original_range, target_range, dtype="float32"
):
"""Convert input values from `original_range` to `target_range`.
This function is intended to be used in preprocessing layers that
rely upon color values. This allows us to assume internally that
the input tensor is always in the range `(0, 255)`.
Args:
images: the set of images to transform to the target range.
original_range: the value range to transform from.
target_range: the value range to transform to.
dtype: the dtype to compute the conversion with,
defaults to "float32".
Returns:
a new Tensor with values in the target range.
Example:
```python
original_range = [0, 1]
target_range = [0, 255]
images = layer.preprocessing.transform_value_range(
images,
original_range,
target_range
)
images = ops.minimum(images + 10, 255)
images = layer.preprocessing.transform_value_range(
images,
target_range,
original_range
)
```
"""
if (
original_range[0] == target_range[0]
and original_range[1] == target_range[1]
):
return images
images = self.backend.cast(images, dtype=dtype)
original_min_value, original_max_value = self._unwrap_value_range(
original_range, dtype=dtype
)
target_min_value, target_max_value = self._unwrap_value_range(
target_range, dtype=dtype
)
# images in the [0, 1] scale
images = (images - original_min_value) / (
original_max_value - original_min_value
)
scale_factor = target_max_value - target_min_value
return (images * scale_factor) + target_min_value
def _unwrap_value_range(self, value_range, dtype="float32"):
min_value, max_value = value_range
min_value = self.backend.cast(min_value, dtype=dtype)
max_value = self.backend.cast(max_value, dtype=dtype)
return min_value, max_value
def _compute_affine_matrix(
self,
center_x,
center_y,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
):
"""
# Scaling Shear Rotation
# [sx 0 0] [1 shx 0] [cos(θ) -sin(θ) 0]
# M = [0 sy 0] * [shy 1 0] * [sin(θ) cos(θ) 0]
# [0 0 1] [0 0 1] [0 0 1]
# a0 = sx * (cos(θ) + shx * sin(θ))
# a1 = sx * (-sin(θ) + shx * cos(θ))
# a2 = tx + cx - cx * a0 - cy * a1
# b0 = sy * (shy * cos(θ) + sin(θ))
# b1 = sy * (shy * -sin(θ) + cos(θ))
# b2 = ty + cy - cx * b0 - cy * b1
"""
ops = self.backend
degree_to_radian_factor = ops.convert_to_tensor(math.pi / 180.0)
angle = angle * degree_to_radian_factor
shear_x = shear_x * degree_to_radian_factor
shear_y = shear_y * degree_to_radian_factor
batch_size = ops.shape(angle)[0]
dtype = angle.dtype
width = ops.cast(width, dtype)
height = ops.cast(height, dtype)
cx = center_x * (width - 1)
cy = center_y * (height - 1)
cos_theta = ops.numpy.cos(angle)
sin_theta = ops.numpy.sin(angle)
shear_x = ops.numpy.tan(shear_x)
shear_y = ops.numpy.tan(shear_y)
a0 = scale * (cos_theta + shear_x * sin_theta)
a1 = scale * (-sin_theta + shear_x * cos_theta)
a2 = translate_x + cx - cx * a0 - cy * a1
b0 = scale * (shear_y * cos_theta + sin_theta)
b1 = scale * (shear_y * -sin_theta + cos_theta)
b2 = translate_y + cy - cx * b0 - cy * b1
affine_matrix = ops.numpy.concatenate(
[
a0[:, None],
a1[:, None],
a2[:, None],
b0[:, None],
b1[:, None],
b2[:, None],
ops.numpy.zeros((batch_size, 2)),
],
axis=1,
)
return affine_matrix
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/aug_mix.py | keras/src/layers/preprocessing/image_preprocessing/aug_mix.py | import random as py_random
import keras.src.layers as layers
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
from keras.src.utils import backend_utils
AUGMENT_LAYERS_ALL = [
"random_shear",
"random_translation",
"random_rotation",
"random_posterization",
"solarization",
"auto_contrast",
"equalization",
"random_brightness",
"random_color_degeneration",
"random_contrast",
"random_sharpness",
]
AUGMENT_LAYERS = [
"random_shear",
"random_translation",
"random_rotation",
"random_posterization",
"solarization",
"auto_contrast",
"equalization",
]
@keras_export("keras.layers.AugMix")
class AugMix(BaseImagePreprocessingLayer):
"""Performs the AugMix data augmentation technique.
AugMix aims to produce images with variety while preserving the image
semantics and local statistics. During the augmentation process,
the same augmentation is applied across all images in the batch
in num_chains different ways, with each chain consisting of
chain_depth augmentations.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [AugMix paper](https://arxiv.org/pdf/1912.02781)
- [Official Code](https://github.com/google-research/augmix)
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written (low, high).
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
num_chains: an integer representing the number of different chains to
be mixed, defaults to 3.
chain_depth: an integer representing the maximum number of
transformations to be applied in each chain. The actual number
of transformations in each chain will be sampled randomly
from the range `[0, `chain_depth`]`. Defaults to 3.
factor: The strength of the augmentation as a normalized value
between 0 and 1. Default is 0.3.
alpha: a float value used as the probability coefficients for the
Beta and Dirichlet distributions, defaults to 1.0.
all_ops: Use all operations (including random_brightness,
random_color_degeneration, random_contrast and random_sharpness).
Default is True.
interpolation: The interpolation method to use for resizing operations.
Options include `"nearest"`, `"bilinear"`. Default is `"bilinear"`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
value_range=(0, 255),
num_chains=3,
chain_depth=3,
factor=0.3,
alpha=1.0,
all_ops=True,
interpolation="bilinear",
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.value_range = value_range
self.num_chains = num_chains
self.chain_depth = chain_depth
self._set_factor(factor)
self.alpha = alpha
self.all_ops = all_ops
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
if self.all_ops:
self._augment_layers = AUGMENT_LAYERS_ALL
else:
self._augment_layers = AUGMENT_LAYERS
self.random_shear = layers.RandomShear(
x_factor=self.factor,
y_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_translation = layers.RandomTranslation(
height_factor=self.factor,
width_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_rotation = layers.RandomRotation(
factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.solarization = layers.Solarization(
addition_factor=self.factor,
threshold_factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_posterization = layers.RandomPosterization(
factor=max(1, int(8 * self.factor[1])),
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.auto_contrast = layers.AutoContrast(
value_range=self.value_range, data_format=data_format, **kwargs
)
self.equalization = layers.Equalization(
value_range=self.value_range, data_format=data_format, **kwargs
)
if self.all_ops:
self.random_brightness = layers.RandomBrightness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_color_degeneration = layers.RandomColorDegeneration(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_contrast = layers.RandomContrast(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_sharpness = layers.RandomSharpness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
def build(self, input_shape):
for layer_name in self._augment_layers:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.build(input_shape)
def _sample_from_dirichlet(self, shape, alpha, seed):
gamma_sample = self.backend.random.gamma(
shape=shape,
alpha=alpha,
seed=seed,
)
return gamma_sample / self.backend.numpy.sum(
gamma_sample, axis=-1, keepdims=True
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
for layer_name in self._augment_layers:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.backend.set_backend("tensorflow")
seed = seed or self._get_seed_generator(self.backend._backend)
chain_mixing_weights = self._sample_from_dirichlet(
[self.num_chains], self.alpha, seed
)
weight_sample = self.backend.random.beta(
shape=(),
alpha=self.alpha,
beta=self.alpha,
seed=seed,
)
chain_transforms = []
for _ in range(self.num_chains):
depth_transforms = []
for _ in range(self.chain_depth):
layer_name = py_random.choice(self._augment_layers + [None])
if layer_name is None:
continue
augmentation_layer = getattr(self, layer_name)
depth_transforms.append(
{
"layer_name": layer_name,
"transformation": (
augmentation_layer.get_random_transformation(
data,
seed=self._get_seed_generator(
self.backend._backend
),
)
),
}
)
chain_transforms.append(depth_transforms)
transformation = {
"chain_mixing_weights": chain_mixing_weights,
"weight_sample": weight_sample,
"chain_transforms": chain_transforms,
}
return transformation
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
chain_mixing_weights = self.backend.cast(
transformation["chain_mixing_weights"], dtype=self.compute_dtype
)
weight_sample = self.backend.cast(
transformation["weight_sample"], dtype=self.compute_dtype
)
chain_transforms = transformation["chain_transforms"]
aug_images = self.backend.numpy.zeros_like(images)
for idx, chain_transform in enumerate(chain_transforms):
copied_images = self.backend.numpy.copy(images)
for depth_transform in chain_transform:
layer_name = depth_transform["layer_name"]
layer_transform = depth_transform["transformation"]
augmentation_layer = getattr(self, layer_name)
copied_images = augmentation_layer.transform_images(
copied_images, layer_transform
)
aug_images += copied_images * chain_mixing_weights[idx]
images = weight_sample * images + (1 - weight_sample) * aug_images
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"value_range": self.value_range,
"num_chains": self.chain_depth,
"chain_depth": self.num_chains,
"factor": self.factor,
"alpha": self.alpha,
"all_ops": self.all_ops,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_hue.py | keras/src/layers/preprocessing/image_preprocessing/random_hue.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomHue")
class RandomHue(BaseImagePreprocessingLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of `1.0` performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_hue = keras.layers.RandomHue(factor=0.5, value_range=[0, 1])
images = keras.ops.cast(images, "float32")
augmented_images_batch = random_hue(images[:8])
```
"""
_USE_BASE_FACTOR = True
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.value_range = value_range
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
invert = self.backend.random.uniform((batch_size,), seed=seed)
invert = self.backend.numpy.where(
invert > 0.5,
-self.backend.numpy.ones_like(invert),
self.backend.numpy.ones_like(invert),
)
factor = self.backend.random.uniform(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
return {"factor": invert * factor * 0.5}
def transform_images(self, images, transformation=None, training=True):
def _apply_random_hue(images, transformation):
images = self.backend.cast(images, self.compute_dtype)
images = self._transform_value_range(
images, self.value_range, (0, 1)
)
adjust_factors = transformation["factor"]
adjust_factors = self.backend.cast(adjust_factors, images.dtype)
adjust_factors = self.backend.numpy.expand_dims(adjust_factors, -1)
adjust_factors = self.backend.numpy.expand_dims(adjust_factors, -1)
images = self.backend.image.rgb_to_hsv(
images, data_format=self.data_format
)
if self.data_format == "channels_first":
h_channel = images[:, 0, :, :] + adjust_factors
h_channel = self.backend.numpy.where(
h_channel > 1.0, h_channel - 1.0, h_channel
)
h_channel = self.backend.numpy.where(
h_channel < 0.0, h_channel + 1.0, h_channel
)
images = self.backend.numpy.stack(
[h_channel, images[:, 1, :, :], images[:, 2, :, :]], axis=1
)
else:
h_channel = images[..., 0] + adjust_factors
h_channel = self.backend.numpy.where(
h_channel > 1.0, h_channel - 1.0, h_channel
)
h_channel = self.backend.numpy.where(
h_channel < 0.0, h_channel + 1.0, h_channel
)
images = self.backend.numpy.stack(
[h_channel, images[..., 1], images[..., 2]], axis=-1
)
images = self.backend.image.hsv_to_rgb(
images, data_format=self.data_format
)
images = self.backend.numpy.clip(images, 0, 1)
images = self._transform_value_range(
images, (0, 1), self.value_range
)
images = self.backend.cast(images, self.compute_dtype)
return images
if training:
images = _apply_random_hue(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_color_jitter_test.py | keras/src/layers/preprocessing/image_preprocessing/random_color_jitter_test.py | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomColorJitterTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomColorJitter,
init_kwargs={
"value_range": (20, 200),
"brightness_factor": 0.2,
"contrast_factor": 0.2,
"saturation_factor": 0.2,
"hue_factor": 0.2,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_color_jitter_inference(self):
seed = 3481
layer = layers.RandomColorJitter(
value_range=(0, 1),
brightness_factor=0.1,
contrast_factor=0.2,
saturation_factor=0.9,
hue_factor=0.1,
)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_brightness_only(self):
seed = 2390
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
else:
inputs = np.random.random((12, 3, 8, 16))
layer = layers.RandomColorJitter(
brightness_factor=[0.5, 0.5], seed=seed
)
output = backend.convert_to_numpy(layer(inputs))
layer = layers.RandomBrightness(factor=[0.5, 0.5], seed=seed)
sub_output = backend.convert_to_numpy(layer(inputs))
self.assertAllClose(output, sub_output)
def test_saturation_only(self):
seed = 2390
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
else:
inputs = np.random.random((12, 3, 8, 16))
layer = layers.RandomColorJitter(
saturation_factor=[0.5, 0.5], seed=seed
)
output = layer(inputs)
layer = layers.RandomSaturation(factor=[0.5, 0.5], seed=seed)
sub_output = layer(inputs)
self.assertAllClose(output, sub_output)
def test_hue_only(self):
seed = 2390
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
else:
inputs = np.random.random((12, 3, 8, 16))
layer = layers.RandomColorJitter(hue_factor=[0.5, 0.5], seed=seed)
output = layer(inputs)
layer = layers.RandomHue(factor=[0.5, 0.5], seed=seed)
sub_output = layer(inputs)
self.assertAllClose(output, sub_output)
def test_contrast_only(self):
seed = 2390
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
else:
inputs = np.random.random((12, 3, 8, 16))
layer = layers.RandomColorJitter(contrast_factor=[0.5, 0.5], seed=seed)
output = layer(inputs)
layer = layers.RandomContrast(factor=[0.5, 0.5], seed=seed)
sub_output = layer(inputs)
self.assertAllClose(output, sub_output)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomColorJitter(
value_range=(0, 1),
brightness_factor=0.1,
contrast_factor=0.2,
saturation_factor=0.9,
hue_factor=0.1,
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/center_crop_test.py | keras/src/layers/preprocessing/image_preprocessing/center_crop_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CenterCropTest(testing.TestCase):
def np_center_crop(self, img, h_new, w_new, data_format="channels_last"):
img = np.array(img)
if img.ndim == 4:
if data_format == "channels_last":
_, h, w = img.shape[:3]
else:
_, h, w = img.shape[1:]
else:
if data_format == "channels_last":
h, w = img.shape[:2]
else:
h, w = img.shape[1:]
h_start = (h - h_new) // 2
w_start = (w - w_new) // 2
if data_format == "channels_last":
return img[
..., h_start : h_start + h_new, w_start : w_start + w_new, :
]
else:
return img[
..., h_start : h_start + h_new, w_start : w_start + w_new
]
@pytest.mark.requires_trainable_backend
def test_center_crop_basics(self):
self.run_layer_test(
layers.CenterCrop,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_last",
},
input_shape=(2, 12, 12, 3),
expected_output_shape=(2, 6, 6, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.CenterCrop,
init_kwargs={
"height": 7,
"width": 7,
"data_format": "channels_first",
},
input_shape=(2, 3, 13, 13),
expected_output_shape=(2, 3, 7, 7),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((5, 7), "channels_first"),
((5, 7), "channels_last"),
((4, 9), "channels_first"),
((9, 4), "channels_last"),
]
)
def test_center_crop_correctness(self, size, data_format):
# batched case
if data_format == "channels_first":
img = np.random.random((2, 3, 9, 11))
else:
img = np.random.random((2, 9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
if data_format == "channels_first":
img_transpose = np.transpose(img, (0, 2, 3, 1))
ref_out = np.transpose(
self.np_center_crop(img_transpose, size[0], size[1]),
(0, 3, 1, 2),
)
else:
ref_out = self.np_center_crop(img, size[0], size[1])
self.assertAllClose(ref_out, out)
# unbatched case
if data_format == "channels_first":
img = np.random.random((3, 9, 11))
else:
img = np.random.random((9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
if data_format == "channels_first":
img_transpose = np.transpose(img, (1, 2, 0))
ref_out = np.transpose(
self.np_center_crop(
img_transpose,
size[0],
size[1],
),
(2, 0, 1),
)
else:
ref_out = self.np_center_crop(
img,
size[0],
size[1],
)
self.assertAllClose(ref_out, out)
@parameterized.parameters(
[
((15, 10), "channels_first"),
((10, 17), "channels_last"),
]
)
def test_input_smaller_than_crop_box(self, size, data_format):
"""Output should equal resizing with crop_to_aspect ratio."""
# batched case
if data_format == "channels_first":
img = np.random.random((2, 3, 9, 11))
else:
img = np.random.random((2, 9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
ref_out = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertAllClose(ref_out, out)
# unbatched case
if data_format == "channels_first":
img = np.random.random((3, 9, 11))
else:
img = np.random.random((9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
ref_out = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertAllClose(ref_out, out)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.CenterCrop(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
# TODO
# def test_list_compatibility(self):
# if backend.config.image_data_format() == "channels_last":
# images = [
# np.random.rand(10, 10, 3),
# np.random.rand(10, 10, 3),
# ]
# output_shape = (2, 6, 5, 3)
# else:
# images = [
# np.random.rand(3, 10, 10),
# np.random.rand(3, 10, 10),
# ]
# output_shape = (2, 3, 6, 5)
# output = layers.CenterCrop(height=6, width=5)(images)
# ref_output = self.np_center_crop(
# images, 6, 5, data_format=backend.config.image_data_format()
# )
# self.assertEqual(tuple(output.shape), output_shape)
# self.assertAllClose(ref_output, output)
@parameterized.parameters(
[((5, 17), "channels_last"), ((5, 100), "channels_last")]
)
def test_image_stretch(self, size, data_format):
img = np.random.rand(2, 11, 3, 9)
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
ref_out = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertAllClose(ref_out, out)
@parameterized.named_parameters(
(
"normal",
5,
5,
[[1.0, 0.0, 3.0, 1.0], [5.0, 2.0, 5.0, 4.0]],
),
(
"with_stretch",
20,
20,
[[5.0, 0.0, 10.0, 5.0], [15.0, 7.5, 20.0, 12.5]],
),
)
def test_center_crop_bounding_boxes(self, height, width, expected_boxes):
if backend.config.image_data_format() == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
center_crop_layer = layers.CenterCrop(
height=height,
width=width,
bounding_box_format="xyxy",
)
output = center_crop_layer(input_data)
self.assertAllClose(output["bounding_boxes"]["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"normal",
5,
5,
[[1.0, 0.0, 3.0, 1.0], [5.0, 2.0, 5.0, 4.0]],
),
(
"with_stretch",
20,
20,
[[5.0, 0.0, 10.0, 5.0], [15.0, 7.5, 20.0, 12.5]],
),
)
def test_center_crop_tf_data_bounding_boxes(
self, height, width, expected_boxes
):
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
center_crop_layer = layers.CenterCrop(
height=height,
width=width,
bounding_box_format="xyxy",
)
ds = ds.map(center_crop_layer)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["bounding_boxes"]["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/resizing_test.py | keras/src/layers/preprocessing/image_preprocessing/resizing_test.py | import grain
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import Sequential
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.testing.test_utils import named_product
class ResizingTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
interpolation=["nearest", "bilinear", "bicubic", "lanczos5"],
crop_pad=[(False, False), (True, False), (False, True)],
antialias=[False, True],
data_format=["channels_last", "channels_first"],
)
)
def test_resizing_basics(
self,
interpolation,
crop_pad,
antialias,
data_format,
):
if interpolation == "lanczos5" and backend.backend() == "torch":
self.skipTest("Torch does not support lanczos.")
crop_to_aspect_ratio, pad_to_aspect_ratio = crop_pad
if data_format == "channels_last":
input_shape = (2, 12, 12, 3)
expected_output_shape = (2, 6, 6, 3)
else:
input_shape = (2, 3, 12, 12)
expected_output_shape = (2, 3, 6, 6)
self.run_layer_test(
layers.Resizing,
init_kwargs={
"height": 6,
"width": 6,
"interpolation": interpolation,
"crop_to_aspect_ratio": crop_to_aspect_ratio,
"pad_to_aspect_ratio": pad_to_aspect_ratio,
"antialias": antialias,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_down_sampling_numeric(self, data_format):
img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(np.float32)
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=2, width=2, interpolation="nearest", data_format=data_format
)(img)
ref_out = (
np.asarray([[5, 7], [13, 15]])
.astype(np.float32)
.reshape((1, 2, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_up_sampling_numeric(self, data_format):
img = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(np.float32)
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=4,
width=4,
interpolation="nearest",
data_format=data_format,
)(img)
ref_out = (
np.asarray([[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]])
.astype(np.float32)
.reshape((1, 4, 4, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_crop_to_aspect_ratio(self, data_format):
img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype("float32")
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=4,
width=2,
interpolation="nearest",
data_format=data_format,
crop_to_aspect_ratio=True,
)(img)
ref_out = (
np.asarray(
[
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]
)
.astype("float32")
.reshape((1, 4, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_unbatched_image(self, data_format):
img = np.reshape(np.arange(0, 16), (4, 4, 1)).astype("float32")
if data_format == "channels_first":
img = img.transpose(2, 0, 1)
out = layers.Resizing(
2, 2, interpolation="nearest", data_format=data_format
)(img)
ref_out = (
np.asarray(
[
[5, 7],
[13, 15],
]
)
.astype("float32")
.reshape((2, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(2, 0, 1)
self.assertAllClose(ref_out, out)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Resizing(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_grain_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Resizing(8, 9)
input_data = np.random.random(input_shape)
ds = (
grain.MapDataset.source(input_data)
.to_iter_dataset()
.batch(2)
.map(layer)
)
output = next(iter(ds))
output_np = backend.convert_to_numpy(output)
self.assertEqual(tuple(output_np.shape), output_shape)
self.assertTrue(backend.is_tensor(output))
# Ensure the device of the data is on CPU.
if backend.backend() == "tensorflow":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "jax":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "torch":
self.assertEqual("cpu", str(output.device))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Sequential + tf.data only works with TF backend",
)
def test_tf_data_compatibility_sequential(self):
# Test compatibility when wrapping in a Sequential
# https://github.com/keras-team/keras/issues/347
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Resizing(8, 9)
input_data = np.random.random(input_shape)
ds = (
tf_data.Dataset.from_tensor_slices(input_data)
.batch(2)
.map(Sequential([layer]))
)
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
@parameterized.parameters(
[((15, 10), "channels_last"), ((15, 100), "channels_last")]
)
def test_data_stretch(self, size, data_format):
img = np.random.rand(1, 1, 4, 4)
output = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertEqual(output.shape, (1, *size, 4))
@parameterized.named_parameters(
(
"with_pad_to_aspect_ratio",
True,
False,
[[6.0, 2.0, 10.0, 6.0], [14.0, 8.0, 18.0, 12.0]],
),
(
"with_crop_to_aspect_ratio",
False,
True,
[[5.0, 0.5, 10.0, 5.5], [15.0, 8.0, 20.0, 13.0]],
),
(
"boxes_stretch",
False,
False,
[[5.0, 2.0, 10.0, 6.0], [15.0, 8.0, 20.0, 12.0]],
),
)
def test_resize_bounding_boxes(
self, pad_to_aspect_ratio, crop_to_aspect_ratio, expected_boxes
):
if backend.config.image_data_format() == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
), # Example boxes (normalized)
"labels": np.array([[1, 2]]), # Dummy labels
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
resizing_layer = layers.Resizing(
height=20,
width=20,
pad_to_aspect_ratio=pad_to_aspect_ratio,
crop_to_aspect_ratio=crop_to_aspect_ratio,
bounding_box_format="xyxy",
)
output = resizing_layer(input_data)
self.assertAllClose(output["bounding_boxes"]["boxes"], expected_boxes)
@parameterized.named_parameters(
(
"with_pad_to_aspect_ratio",
True,
False,
[[6.0, 2.0, 10.0, 6.0], [14.0, 8.0, 18.0, 12.0]],
),
(
"with_crop_to_aspect_ratio",
False,
True,
[[5.0, 0.5, 10.0, 5.5], [15.0, 8.0, 20.0, 13.0]],
),
(
"boxes_stretch",
False,
False,
[[5.0, 2.0, 10.0, 6.0], [15.0, 8.0, 20.0, 12.0]],
),
)
def test_resize_tf_data_bounding_boxes(
self, pad_to_aspect_ratio, crop_to_aspect_ratio, expected_boxes
):
if backend.config.image_data_format() == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
), # Example boxes (normalized)
"labels": np.array([[1, 2]]), # Dummy labels
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
ds = tf_data.Dataset.from_tensor_slices(input_data)
resizing_layer = layers.Resizing(
height=20,
width=20,
pad_to_aspect_ratio=pad_to_aspect_ratio,
crop_to_aspect_ratio=crop_to_aspect_ratio,
bounding_box_format="xyxy",
)
ds = ds.map(resizing_layer)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["bounding_boxes"]["boxes"], expected_boxes)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/random_perspective.py | keras/src/layers/preprocessing/image_preprocessing/random_perspective.py | from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.RandomPerspective")
class RandomPerspective(BaseImagePreprocessingLayer):
"""A preprocessing layer that applies random perspective transformations.
This layer distorts the perspective of input images by shifting their
corner points, simulating a 3D-like transformation. The amount of distortion
is controlled by the `factor` and `scale` parameters.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A float or a tuple of two floats.
Represents the probability of applying the perspective
transformation to each image in the batch.
- `factor=0.0` ensures no transformation is applied.
- `factor=1.0` means the transformation is always applied.
- If a tuple `(min, max)` is provided, a probability is randomly
sampled between `min` and `max` for each image.
- If a single float is given, the probability is sampled between
`0.0` and the provided float.
Default is 1.0.
scale: A float defining the relative amount of perspective shift.
Determines how much the image corners are displaced, affecting
the intensity of the perspective effect.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
factor=1.0,
scale=1.0,
interpolation="bilinear",
fill_value=0.0,
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.scale = scale
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.supports_jit = False
if scale < 0.0 or scale > 1.0:
raise ValueError(
"The `scale` argument should be a number "
"in the range "
f"[0,1]. "
f"Received: scale={scale}"
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
unbatched = len(images_shape) == 3
if unbatched:
batch_size = 1
else:
batch_size = images_shape[0]
height, width = (
images.shape[self.height_axis],
images.shape[self.width_axis],
)
seed = seed or self._get_seed_generator(self.backend._backend)
transformation_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
apply_perspective = random_threshold < transformation_probability
perspective_factor = self.backend.random.uniform(
shape=(batch_size, 4, 2),
minval=-0.5 * self.scale,
maxval=0.5 * self.scale,
seed=seed,
dtype=self.compute_dtype,
)
start_points = self.backend.convert_to_tensor(
[
[
[0.0, 0.0],
[width - 1, 0.0],
[0.0, height - 1],
[width - 1, height - 1],
]
],
dtype=self.compute_dtype,
)
start_points = self.backend.numpy.repeat(
start_points, batch_size, axis=0
)
end_points = start_points + start_points * perspective_factor
return {
"apply_perspective": apply_perspective,
"start_points": start_points,
"end_points": end_points,
"input_shape": images_shape,
}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
images = self._perspective_inputs(images, transformation)
images = self.backend.cast(images, self.compute_dtype)
return images
def _perspective_inputs(self, inputs, transformation):
if transformation is None:
return inputs
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
start_points = transformation["start_points"]
end_points = transformation["end_points"]
outputs = self.backend.image.perspective_transform(
inputs,
start_points,
end_points,
interpolation=self.interpolation,
fill_value=self.fill_value,
data_format=self.data_format,
)
apply_perspective = transformation["apply_perspective"]
outputs = self.backend.numpy.where(
apply_perspective[:, None, None, None],
outputs,
inputs,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training and transformation is not None:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
input_height, input_width = (
transformation["input_shape"][self.height_axis],
transformation["input_shape"][self.width_axis],
)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=input_height,
width=input_width,
)
boxes = bounding_boxes["boxes"]
x0, y0, x1, y1 = self.backend.numpy.split(boxes, 4, axis=-1)
start_points = transformation["start_points"]
end_points = transformation["end_points"]
transform = self.backend.image.compute_homography_matrix(
start_points, end_points
)
transform = self.backend.numpy.expand_dims(transform, axis=1)
transform = self.backend.cast(transform, dtype=self.compute_dtype)
corners = [
self._get_transformed_coordinates(x, y, transform)
for x, y in [(x0, y0), (x1, y1), (x0, y1), (x1, y0)]
]
x_corners, y_corners = zip(*corners)
xs = self.backend.numpy.stack(x_corners, axis=-1)
ys = self.backend.numpy.stack(y_corners, axis=-1)
min_x, max_x = (
self.backend.numpy.min(xs, axis=-1),
self.backend.numpy.max(xs, axis=-1),
)
min_y, max_y = (
self.backend.numpy.min(ys, axis=-1),
self.backend.numpy.max(ys, axis=-1),
)
min_x = self.backend.numpy.expand_dims(min_x, axis=-1)
max_x = self.backend.numpy.expand_dims(max_x, axis=-1)
min_y = self.backend.numpy.expand_dims(min_y, axis=-1)
max_y = self.backend.numpy.expand_dims(max_y, axis=-1)
boxes = self.backend.numpy.concatenate(
[min_x, min_y, max_x, max_y], axis=-1
)
apply_perspective = self.backend.core.convert_to_tensor(
transformation["apply_perspective"], dtype=boxes.dtype
)
bounding_boxes["boxes"] = self.backend.numpy.where(
apply_perspective[:, None, None],
boxes,
bounding_boxes["boxes"],
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="xyxy",
)
self.backend.reset()
return bounding_boxes
def _get_transformed_coordinates(
self, x_coords, y_coords, transformation_matrix
):
backend = self.backend
batch_size = backend.shape(transformation_matrix)[0]
homogeneous_transform = backend.numpy.concatenate(
[transformation_matrix, backend.numpy.ones((batch_size, 1, 1))],
axis=-1,
)
homogeneous_transform = backend.numpy.reshape(
homogeneous_transform, (batch_size, 3, 3)
)
inverse_transform = backend.linalg.inv(homogeneous_transform)
ones_column = backend.numpy.ones_like(x_coords)
homogeneous_coords = backend.numpy.concatenate(
[x_coords, y_coords, ones_column], axis=-1
)
homogeneous_coords = backend.numpy.moveaxis(homogeneous_coords, -1, -2)
transformed_coords = backend.numpy.matmul(
inverse_transform, homogeneous_coords
)
transformed_coords = backend.numpy.moveaxis(transformed_coords, -1, -2)
x_transformed = transformed_coords[..., 0] / transformed_coords[..., 2]
y_transformed = transformed_coords[..., 1] / transformed_coords[..., 2]
return x_transformed, y_transformed
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"factor": self.factor,
"scale": self.scale,
"interpolation": self.interpolation,
"fill_value": self.fill_value,
"seed": self.seed,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/preprocessing/image_preprocessing/mix_up.py | keras/src/layers/preprocessing/image_preprocessing/mix_up.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random import SeedGenerator
from keras.src.utils import backend_utils
@keras_export("keras.layers.MixUp")
class MixUp(BaseImagePreprocessingLayer):
"""MixUp implements the MixUp data augmentation technique.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [MixUp paper](https://arxiv.org/abs/1710.09412).
- [MixUp for Object Detection paper](https://arxiv.org/pdf/1902.04103).
Args:
alpha: Float between 0 and 1. Controls the blending strength.
Smaller values mean less mixing, while larger values allow
for more blending between images. Defaults to 0.2,
recommended for ImageNet1k classification.
seed: Integer. Used to create a random seed.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
images, labels = images[:8], labels[:8]
labels = keras.ops.cast(keras.ops.one_hot(labels.flatten(), 10), "float32")
mix_up = keras.layers.MixUp(alpha=0.2)
output = mix_up({"images": images, "labels": labels})
```
"""
def __init__(self, alpha=0.2, data_format=None, seed=None, **kwargs):
super().__init__(data_format=data_format, **kwargs)
self.alpha = alpha
self.seed = seed
self.generator = SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 3:
batch_size = 1
else:
batch_size = self.backend.shape(images)[0]
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
permutation_order = self.backend.random.shuffle(
self.backend.numpy.arange(0, batch_size, dtype="int64"),
seed=seed,
)
mix_weight = self.backend.random.beta(
(batch_size,), self.alpha, self.alpha, seed=seed
)
return {
"mix_weight": mix_weight,
"permutation_order": permutation_order,
}
def transform_images(self, images, transformation=None, training=True):
def _mix_up_input(images, transformation):
images = self.backend.cast(images, self.compute_dtype)
mix_weight = transformation["mix_weight"]
permutation_order = transformation["permutation_order"]
mix_weight = self.backend.cast(
self.backend.numpy.reshape(mix_weight, [-1, 1, 1, 1]),
dtype=self.compute_dtype,
)
mix_up_images = self.backend.cast(
self.backend.numpy.take(images, permutation_order, axis=0),
dtype=self.compute_dtype,
)
images = mix_weight * images + (1.0 - mix_weight) * mix_up_images
return images
if training:
images = _mix_up_input(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
def _mix_up_labels(labels, transformation):
mix_weight = transformation["mix_weight"]
permutation_order = transformation["permutation_order"]
labels_for_mix_up = self.backend.numpy.take(
labels, permutation_order, axis=0
)
mix_weight = self.backend.numpy.reshape(mix_weight, [-1, 1])
labels = (
mix_weight * labels + (1.0 - mix_weight) * labels_for_mix_up
)
return labels
if training:
labels = _mix_up_labels(labels, transformation)
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
def _mix_up_bounding_boxes(bounding_boxes, transformation):
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
permutation_order = transformation["permutation_order"]
# Make sure we are on cpu for torch tensors.
permutation_order = ops.convert_to_numpy(permutation_order)
boxes, labels = bounding_boxes["boxes"], bounding_boxes["labels"]
boxes_for_mix_up = self.backend.numpy.take(
boxes, permutation_order, axis=0
)
labels_for_mix_up = self.backend.numpy.take(
labels, permutation_order, axis=0
)
boxes = self.backend.numpy.concatenate(
[boxes, boxes_for_mix_up], axis=1
)
labels = self.backend.numpy.concatenate(
[labels, labels_for_mix_up], axis=0
)
self.backend.reset()
return {"boxes": boxes, "labels": labels}
if training:
bounding_boxes = _mix_up_bounding_boxes(
bounding_boxes, transformation
)
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
def _mix_up_segmentation_masks(segmentation_masks, transformation):
mix_weight = transformation["mix_weight"]
# Make sure we are on cpu for torch tensors.
mix_weight = ops.convert_to_numpy(mix_weight)
permutation_order = transformation["permutation_order"]
mix_weight = self.backend.numpy.reshape(mix_weight, [-1, 1, 1, 1])
segmentation_masks_for_mix_up = self.backend.numpy.take(
segmentation_masks, permutation_order
)
segmentation_masks = (
mix_weight * segmentation_masks
+ (1.0 - mix_weight) * segmentation_masks_for_mix_up
)
return segmentation_masks
if training:
segmentation_masks = _mix_up_segmentation_masks(
segmentation_masks, transformation
)
return segmentation_masks
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"alpha": self.alpha,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.