repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/integration_tests/dataset_tests/california_housing_test.py | integration_tests/dataset_tests/california_housing_test.py | from keras.src import testing
from keras.src.datasets import california_housing
class CaliforniaHousingTest(testing.TestCase):
def test_load_data_large(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="large"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the dataset contains 20,640 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 20640)
def test_load_data_small(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="small"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the small dataset contains 600 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 600)
def test_invalid_version(self):
with self.assertRaises(ValueError):
california_housing.load_data(version="invalid_version")
def test_seed_reproducibility(self):
# Ensure the data is reproducible with the same seed
seed = 123
first_load = california_housing.load_data(version="large", seed=seed)
second_load = california_housing.load_data(version="large", seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/integration_tests/dataset_tests/cifar10_test.py | integration_tests/dataset_tests/cifar10_test.py | import numpy as np
from keras.src import testing
from keras.src.datasets import cifar10
class Cifar10LoadDataTest(testing.TestCase):
def test_x_train_shape(self):
(x_train, _), _ = cifar10.load_data()
self.assertEqual(x_train.shape, (50000, 32, 32, 3))
def test_y_train_shape(self):
(_, y_train), _ = cifar10.load_data()
self.assertEqual(y_train.shape, (50000, 1))
def test_x_test_shape(self):
_, (x_test, _) = cifar10.load_data()
self.assertEqual(x_test.shape, (10000, 32, 32, 3))
def test_y_test_shape(self):
_, (_, y_test) = cifar10.load_data()
self.assertEqual(y_test.shape, (10000, 1))
def test_x_train_dtype(self):
(x_train, _), _ = cifar10.load_data()
self.assertEqual(x_train.dtype, np.uint8)
def test_y_train_dtype(self):
(_, y_train), _ = cifar10.load_data()
self.assertEqual(y_train.dtype, np.uint8)
def test_x_test_dtype(self):
_, (x_test, _) = cifar10.load_data()
self.assertEqual(x_test.dtype, np.uint8)
def test_y_test_dtype(self):
_, (_, y_test) = cifar10.load_data()
self.assertEqual(y_test.dtype, np.uint8)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/integration_tests/dataset_tests/fashion_mnist_test.py | integration_tests/dataset_tests/fashion_mnist_test.py | import numpy as np
from keras.src import testing
from keras.src.datasets import fashion_mnist
class FashionMnistLoadDataTest(testing.TestCase):
def test_x_train_shape(self):
(x_train, _), _ = fashion_mnist.load_data()
self.assertEqual(x_train.shape, (60000, 28, 28))
def test_y_train_shape(self):
(_, y_train), _ = fashion_mnist.load_data()
self.assertEqual(y_train.shape, (60000,))
def test_x_test_shape(self):
_, (x_test, _) = fashion_mnist.load_data()
self.assertEqual(x_test.shape, (10000, 28, 28))
def test_y_test_shape(self):
_, (_, y_test) = fashion_mnist.load_data()
self.assertEqual(y_test.shape, (10000,))
def test_x_train_dtype(self):
(x_train, _), _ = fashion_mnist.load_data()
self.assertEqual(x_train.dtype, np.uint8)
def test_y_train_dtype(self):
(_, y_train), _ = fashion_mnist.load_data()
self.assertEqual(y_train.dtype, np.uint8)
def test_x_test_dtype(self):
_, (x_test, _) = fashion_mnist.load_data()
self.assertEqual(x_test.dtype, np.uint8)
def test_y_test_dtype(self):
_, (_, y_test) = fashion_mnist.load_data()
self.assertEqual(y_test.dtype, np.uint8)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/integration_tests/dataset_tests/boston_housing_test.py | integration_tests/dataset_tests/boston_housing_test.py | from keras.src import testing
from keras.src.datasets import boston_housing
class BostonHousingTest(testing.TestCase):
def test_load_data(self):
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
self.assertEqual(x_train.shape[1], 13)
self.assertEqual(x_train.shape[0] + x_test.shape[0], 506)
def test_seed_reproducibility(self):
seed = 123
first_load = boston_housing.load_data(seed=seed)
second_load = boston_housing.load_data(seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
def test_invalid_test_split(self):
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=-0.1)
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=1.0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_jax_distributed.py | examples/demo_jax_distributed.py | # To run this demo, you will need to spin up a "TPU VM" on Google Cloud.
# Please follow instructions here: https://cloud.google.com/tpu/docs/run-calculation-jax
# Force a JAX backend
import os, pprint, collections
os.environ["KERAS_BACKEND"] = "jax"
pp = pprint.PrettyPrinter()
import jax
import jax.numpy as jnp
import tensorflow as tf # just for tf.data
import keras # Keras multi-backend
import numpy as np
from tqdm import tqdm
from jax.experimental import mesh_utils
from jax.sharding import Mesh
from jax.sharding import NamedSharding
from jax.sharding import PartitionSpec as P
""" Dataset
Classic MNIST, loaded using tf.data
"""
BATCH_SIZE = 192
(
(x_train, train_labels),
(x_eval, eval_labels),
) = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1).astype(
np.float32
) # from 28x28 to 28x28 x 1 color channel (B&W)
x_eval = np.expand_dims(x_eval, axis=-1).astype(np.float32)
train_data = tf.data.Dataset.from_tensor_slices((x_train, train_labels))
train_data = train_data.shuffle(5000, reshuffle_each_iteration=True)
train_data = train_data.batch(BATCH_SIZE, drop_remainder=True)
train_data = train_data.repeat()
eval_data = tf.data.Dataset.from_tensor_slices((x_eval, eval_labels))
eval_data = eval_data.batch(10000) # everything as one batch
STEPS_PER_EPOCH = len(train_labels) // BATCH_SIZE
""" Keras model
Simple but non-trivial model with:
* Batch Normalization (non-trainable state updated during training, different training-time and inference behavior)
* Dropout (randomness, different training time and inference behavior)
"""
# Keras "sequential" model building style
def make_backbone():
return keras.Sequential(
[
keras.layers.Rescaling(
1.0 / 255.0
), # input images are in the range [0, 255]
keras.layers.Conv2D(
filters=12, kernel_size=3, padding="same", use_bias=False
),
keras.layers.BatchNormalization(scale=False, center=True),
keras.layers.Activation("relu"),
keras.layers.Conv2D(
filters=24,
kernel_size=6,
padding="same",
use_bias=False,
strides=2,
),
keras.layers.BatchNormalization(scale=False, center=True),
keras.layers.Activation("relu"),
keras.layers.Conv2D(
filters=32,
kernel_size=6,
padding="same",
use_bias=False,
strides=2,
name="large_k",
),
keras.layers.BatchNormalization(scale=False, center=True),
keras.layers.Activation("relu"),
],
name="backbone",
)
def make_model():
input = keras.Input(shape=[28, 28, 1])
y = make_backbone()(input)
y = keras.layers.Flatten()(y)
y = keras.layers.Dense(200, activation="relu")(y)
y = keras.layers.Dropout(0.4)(y)
y = keras.layers.Dense(10, activation="softmax")(y)
model = keras.Model(inputs=input, outputs=y)
return model
""" JAX-native distribution with a Keras model
For now, you have to write a custom training loop for this
Note: The features required by jax.sharding are not supported by the Colab TPU
runtime at this time, but are available on Cloud TPU VMs and Kaggle TPU VMs.
"""
if len(jax.local_devices()) < 8:
raise Exception("This part requires 8 devices to run")
else:
print("\nIdentified local devices:")
pp.pprint(jax.local_devices())
# ----------------- Keras ---------------------
# instantiate the model
model = make_model()
# learning rate
lr = keras.optimizers.schedules.ExponentialDecay(0.01, STEPS_PER_EPOCH, 0.6)
# optimizer
optimizer = keras.optimizers.Adam(lr)
# initialize all state with .build()
(one_batch, one_batch_labels) = next(iter(train_data))
model.build(one_batch)
optimizer.build(model.trainable_variables)
""" Distribution settings
* Sharding the data on the batch axis
* Replicating all model variables
Note: this implements standard "data parallel" distributed training
* Just for show, sharding the largest convolutional kernel along the
"channels" axis 4-ways and replicating 2-ways
Note: this does not reflect a best practice but is intended to show
that you can split a very large kernel across multiple devices
if you have to
"""
print(
"\nMostly data-parallel distribution. "
"Data is sharded across devices while the model is replicated. "
"For demo purposes, we split the largest kernel 4-ways "
"(and replicate 2-ways since we have 8 devices)."
)
# ------------------ Jax ----------------------
devices = mesh_utils.create_device_mesh((8,))
# data will be split along the batch axis
data_mesh = Mesh(devices, axis_names=("batch",)) # naming axes of the mesh
# naming axes of the sharded partition
data_sharding = NamedSharding(
data_mesh,
P(
"batch",
),
)
# all variables will be replicated on all devices
var_mesh = Mesh(devices, axis_names=("_"))
# in NamedSharding, axes that are not mentioned are replicated (all axes here)
var_replication = NamedSharding(var_mesh, P())
# for the demo, we will split the largest kernel 4-ways (and replicate 2-ways since we have 8 devices)
large_kernel_mesh = Mesh(
devices.reshape((-1, 4)), axis_names=(None, "out_chan")
) # naming axes of the mesh
large_kernel_sharding = NamedSharding(
large_kernel_mesh, P(None, None, None, "out_chan")
) # naming axes of the sharded partition
# ----------------- Keras ---------------------
# Use Keras APIs to find the variable of a specific layer (we will be sharding this one in a special way)
# In a Conv2D or Dense layer, the variables are 'kernel' and 'bias'
special_layer_var = model.get_layer("backbone").get_layer("large_k").kernel
# ------------------ Jax ----------------------
# - accessing variables in Keras lists model.trainable_variables,
# - model.non_trainable_variables and optimizer.variables
# Apply the distribution settings to the model variables
non_trainable_variables = jax.device_put(
model.non_trainable_variables, var_replication
)
optimizer_variables = jax.device_put(optimizer.variables, var_replication)
# this is what you would do replicate all trainable variables:
# trainable_variables = jax.device_put(model.trainable_variables, var_replication)
# For the demo, we split the largest kernel 4-ways instead of replicating it.
# We still replicate all other trainable variables as in standard "data-parallel"
# distributed training.
print_once = True
trainable_variables = model.trainable_variables
for i, v in enumerate(trainable_variables):
if v is special_layer_var:
# Apply distribution settings: sharding
sharded_v = jax.device_put(v, large_kernel_sharding)
trainable_variables[i] = sharded_v
print("Sharding of convolutional", v.name, v.shape)
jax.debug.visualize_array_sharding(
jnp.reshape(sharded_v, [-1, v.shape[-1]])
)
else:
# Apply distribution settings: replication
replicated_v = jax.device_put(v, var_replication)
trainable_variables[i] = replicated_v
if print_once:
print_once = False
print(
"\nSharding of all other model variables (they are replicated)"
)
jax.debug.visualize_array_sharding(
jnp.reshape(replicated_v, [-1, v.shape[-1]])
)
# collect state in a handy named tuple
TrainingState = collections.namedtuple(
"TrainingState",
["trainable_variables", "non_trainable_variables", "optimizer_variables"],
)
device_train_state = TrainingState(
trainable_variables=trainable_variables,
non_trainable_variables=non_trainable_variables,
optimizer_variables=optimizer_variables,
)
# display data sharding
x, y = next(iter(train_data))
sharded_x = jax.device_put(x.numpy(), data_sharding)
print("Data sharding")
jax.debug.visualize_array_sharding(jnp.reshape(sharded_x, [-1, 28 * 28]))
# ------------------ Jax ----------------------
# - Using Keras-provided stateless APIs
# - model.stateless_call
# - optimizer.stateless_apply
# These functions also work on other backends.
# define loss
loss = keras.losses.SparseCategoricalCrossentropy()
# This is the loss function that will be differentiated.
# Keras provides a pure functional forward pass: model.stateless_call
def compute_loss(trainable_variables, non_trainable_variables, x, y):
y_pred, updated_non_trainable_variables = model.stateless_call(
trainable_variables, non_trainable_variables, x
)
loss_value = loss(y, y_pred)
return loss_value, updated_non_trainable_variables
# function to compute gradients
compute_gradients = jax.value_and_grad(compute_loss, has_aux=True)
# Training step: Keras provides a pure functional optimizer.stateless_apply
@jax.jit
def train_step(train_state, x, y):
(loss_value, non_trainable_variables), grads = compute_gradients(
train_state.trainable_variables,
train_state.non_trainable_variables,
x,
y,
)
trainable_variables, optimizer_variables = optimizer.stateless_apply(
train_state.optimizer_variables, grads, train_state.trainable_variables
)
return loss_value, TrainingState(
trainable_variables, non_trainable_variables, optimizer_variables
)
# training loop
EPOCHS = 5
print("\nTraining:")
data_iter = iter(train_data)
for epoch in range(EPOCHS):
loss_value = None # default
for i in tqdm(range(STEPS_PER_EPOCH)):
x, y = next(data_iter)
sharded_x = jax.device_put(x.numpy(), data_sharding)
loss_value, device_train_state = train_step(
device_train_state, sharded_x, y.numpy()
)
print("Epoch", epoch, "loss:", loss_value)
# The output of the model is still sharded. Sharding follows the data.
data, labels = next(iter(eval_data))
sharded_data = jax.device_put(data.numpy(), data_sharding)
@jax.jit
def predict(data):
predictions, updated_non_trainable_variables = model.stateless_call(
device_train_state.trainable_variables,
device_train_state.non_trainable_variables,
data,
)
return predictions
predictions = predict(sharded_data)
print("\nModel output sharding follows data sharding:")
jax.debug.visualize_array_sharding(predictions)
# Post-processing model state update to write them back into the model
update = lambda variable, value: variable.assign(value)
jax.tree_map(
update, model.trainable_variables, device_train_state.trainable_variables
)
jax.tree_map(
update,
model.non_trainable_variables,
device_train_state.non_trainable_variables,
)
jax.tree_map(
update, optimizer.variables, device_train_state.optimizer_variables
)
# check that the model has the new state by running an eval
# known issue: the optimizer should not be required here
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
print("\nUpdating model and running an eval:")
loss, accuracy = model.evaluate(eval_data)
print("The model achieved an evaluation accuracy of:", accuracy)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_custom_tf_workflow.py | examples/demo_custom_tf_workflow.py | # flake8: noqa
import os
# Set backend env to tensorflow
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import tensorflow as tf
from keras import Model
from keras import backend
from keras import initializers
from keras import layers
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
w_shape = (input_dim, self.units)
w_value = initializers.GlorotUniform()(w_shape)
self.w = backend.Variable(w_value, name="kernel")
b_shape = (self.units,)
b_value = initializers.Zeros()(b_shape)
self.b = backend.Variable(b_value, name="bias")
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
def call(self, x):
x = tf.nn.relu(self.dense1(x))
x = tf.nn.relu(self.dense2(x))
return self.dense3(x)
def Dataset():
for _ in range(20):
yield (
np.random.random((32, 128)).astype("float32"),
np.random.random((32, 4)).astype("float32"),
)
def loss_fn(y_true, y_pred):
return ops.sum((y_true - y_pred) ** 2)
model = MyModel(hidden_dim=256, output_dim=4)
optimizer = optimizers.SGD(learning_rate=0.001)
dataset = Dataset()
######### Custom TF workflow ###############
@tf.function(jit_compile=True)
def train_step(data):
x, y = data
with tf.GradientTape() as tape:
y_pred = model(x)
loss = loss_fn(y, y_pred)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
for data in dataset:
loss = train_step(data)
print("Loss:", float(loss))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_custom_jax_workflow.py | examples/demo_custom_jax_workflow.py | # flake8: noqa
import os
# Set backend env to JAX
os.environ["KERAS_BACKEND"] = "jax"
import jax
import numpy as np
from keras import Model
from keras import backend
from keras import initializers
from keras import layers
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
w_shape = (input_dim, self.units)
w_value = initializers.GlorotUniform()(w_shape)
self.w = backend.Variable(w_value, name="kernel")
b_shape = (self.units,)
b_value = initializers.Zeros()(b_shape)
self.b = backend.Variable(b_value, name="bias")
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
def call(self, x):
x = jax.nn.relu(self.dense1(x))
x = jax.nn.relu(self.dense2(x))
return self.dense3(x)
def Dataset():
for _ in range(20):
yield (np.random.random((32, 128)), np.random.random((32, 4)))
def loss_fn(y_true, y_pred):
return ops.sum((y_true - y_pred) ** 2)
model = MyModel(hidden_dim=256, output_dim=4)
optimizer = optimizers.SGD(learning_rate=0.001)
dataset = Dataset()
# Build model
x = np.random.random((1, 128))
model(x)
# Build optimizer
optimizer.build(model.trainable_variables)
######### Custom JAX workflow ###############
def compute_loss_and_updates(
trainable_variables, non_trainable_variables, x, y
):
y_pred, non_trainable_variables = model.stateless_call(
trainable_variables, non_trainable_variables, x
)
loss = loss_fn(y, y_pred)
return loss, non_trainable_variables
grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True)
@jax.jit
def train_step(state, data):
trainable_variables, non_trainable_variables, optimizer_variables = state
x, y = data
(loss, non_trainable_variables), grads = grad_fn(
trainable_variables, non_trainable_variables, x, y
)
trainable_variables, optimizer_variables = optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
# Return updated state
return loss, (
trainable_variables,
non_trainable_variables,
optimizer_variables,
)
trainable_variables = model.trainable_variables
non_trainable_variables = model.non_trainable_variables
optimizer_variables = optimizer.variables
state = trainable_variables, non_trainable_variables, optimizer_variables
# Training loop
for data in dataset:
loss, state = train_step(state, data)
print("Loss:", loss)
# Post-processing model state update
trainable_variables, non_trainable_variables, optimizer_variables = state
for variable, value in zip(model.trainable_variables, trainable_variables):
variable.assign(value)
for variable, value in zip(
model.non_trainable_variables, non_trainable_variables
):
variable.assign(value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_subclass.py | examples/demo_subclass.py | import numpy as np
from keras import Model
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 6
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
print("History:")
print(history.history)
model.summary()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_functional.py | examples/demo_functional.py | import numpy as np
from keras import Model
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
import keras
keras.config.disable_traceback_filtering()
inputs = layers.Input((100,))
x = layers.Dense(512, activation="relu")(inputs)
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
x = layers.Dense(512, activation="relu")(x)
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
outputs = layers.Dense(16)(x)
model = Model(inputs, outputs)
model.summary()
x = np.random.random((50000, 100))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 5
model.compile(
optimizer=optimizers.Adam(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[
metrics.CategoricalAccuracy(name="acc"),
metrics.MeanSquaredError(name="mse"),
],
)
print("\nTrain model")
history = model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
print("\nHistory:")
print(history.history)
print("\nEvaluate model")
scores = model.evaluate(x, y, return_dict=True)
print(scores)
print("\nRun inference")
pred = model.predict(x)
print(f"Inferred output shape {pred.shape}")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_mnist_convnet.py | examples/demo_mnist_convnet.py | import numpy as np
import keras
from keras import layers
from keras.utils import to_categorical
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
batch_size = 128
epochs = 3
model = keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_custom_torch_workflow.py | examples/demo_custom_torch_workflow.py | # flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras import layers
import keras
import numpy as np
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 64
num_epochs = 1
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create the Keras model
model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch + 1}/{num_epochs}], "
f"Batch [{batch_idx + 1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
train(model, train_loader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
torch_module = MyModel()
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(torch_module, train_loader, num_epochs, optimizer, loss_fn)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_custom_layer_backend_agnostic.py | examples/demo_custom_layer_backend_agnostic.py | import numpy as np
import keras
from keras import Model
from keras import initializers
from keras import layers
from keras import losses
from keras import metrics
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=(input_dim, self.units),
initializer=initializers.GlorotNormal(),
name="kernel",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=initializers.Zeros(),
name="bias",
trainable=True,
)
def call(self, inputs):
# Use Keras ops to create backend-agnostic layers/metrics/etc.
return ops.matmul(inputs, self.w) + self.b
class MyDropout(layers.Layer):
def __init__(self, rate, name=None):
super().__init__(name=name)
self.rate = rate
# Use seed_generator for managing RNG state.
# It is a state element and its seed variable is
# tracked as part of `layer.variables`.
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
# Use `keras.random` for random ops.
return keras.random.dropout(inputs, self.rate, seed=self.seed_generator)
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
self.dp = MyDropout(0.5)
def call(self, x):
x1 = self.dense1(x)
x2 = self.dense2(x)
# Why not use some ops here as well
x = ops.concatenate([x1, x2], axis=-1)
x = self.dp(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 5
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
model.summary()
print("History:")
print(history.history)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/examples/demo_torch_multi_gpu.py | examples/demo_torch_multi_gpu.py | # flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras import layers
import keras
import numpy as np
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 128
num_epochs = 1
def get_data():
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
return dataset
def get_model():
# Create the Keras model
model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
return model
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch + 1}/{num_epochs}], "
f"Batch [{batch_idx + 1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
def setup(current_gpu_index, num_gpu):
# Device setup
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "56492"
device = torch.device("cuda:{}".format(current_gpu_index))
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=num_gpu,
rank=current_gpu_index,
)
torch.cuda.set_device(device)
def prepare(dataset, current_gpu_index, num_gpu, batch_size):
sampler = DistributedSampler(
dataset,
num_replicas=num_gpu,
rank=current_gpu_index,
shuffle=False,
)
# Create a DataLoader
train_loader = DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
)
return train_loader
def cleanup():
# Cleanup
dist.destroy_process_group()
def main(current_gpu_index, num_gpu):
# setup the process groups
setup(current_gpu_index, num_gpu)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
dataset = get_data()
model = get_model()
# prepare the dataloader
dataloader = prepare(dataset, current_gpu_index, num_gpu, batch_size)
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
# Put model on device
model = model.to(current_gpu_index)
ddp_model = DDP(
model, device_ids=[current_gpu_index], output_device=current_gpu_index
)
train(ddp_model, dataloader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
torch_module = MyModel().to(current_gpu_index)
ddp_torch_module = DDP(
torch_module,
device_ids=[current_gpu_index],
output_device=current_gpu_index,
)
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(ddp_torch_module, dataloader, num_epochs, optimizer, loss_fn)
cleanup()
if __name__ == "__main__":
# GPU parameters
num_gpu = torch.cuda.device_count()
print(f"Running on {num_gpu} GPUs")
torch.multiprocessing.spawn(
main,
args=(num_gpu,),
nprocs=num_gpu,
join=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/__init__.py | keras/__init__.py | # This file should NEVER be packaged! This is a hack to make "import keras" from
# the base of the repo just import the source files. We'll keep it for compat.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
from keras.api import * # noqa: F403, E402
from keras.api import __version__ # noqa: E402
# Don't pollute namespace.
del os
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/version.py | keras/src/version.py | from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.14.0"
@keras_export("keras.version")
def version():
return __version__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/__init__.py | keras/src/__init__.py | from keras.src import activations
from keras.src import applications
from keras.src import backend
from keras.src import constraints
from keras.src import datasets
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import regularizers
from keras.src import utils
from keras.src import visualization
from keras.src.backend import KerasTensor
from keras.src.layers import Input
from keras.src.layers import Layer
from keras.src.models import Functional
from keras.src.models import Model
from keras.src.models import Sequential
from keras.src.version import __version__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/api_export.py | keras/src/api_export.py | try:
import namex
except ImportError:
namex = None
# These dicts reference "canonical names" only
# (i.e. the first name an object was registered with).
REGISTERED_NAMES_TO_OBJS = {}
REGISTERED_OBJS_TO_NAMES = {}
def register_internal_serializable(path, symbol):
global REGISTERED_NAMES_TO_OBJS
if isinstance(path, (list, tuple)):
name = path[0]
else:
name = path
REGISTERED_NAMES_TO_OBJS[name] = symbol
REGISTERED_OBJS_TO_NAMES[symbol] = name
def get_symbol_from_name(name):
return REGISTERED_NAMES_TO_OBJS.get(name, None)
def get_name_from_symbol(symbol):
return REGISTERED_OBJS_TO_NAMES.get(symbol, None)
if namex:
class keras_export(namex.export):
def __init__(self, path):
super().__init__(package="keras", path=path)
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return super().__call__(symbol)
else:
class keras_export:
def __init__(self, path):
self.path = path
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return symbol
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/random_initializers.py | keras/src/initializers/random_initializers.py | import math
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import random
from keras.src.initializers.initializer import Initializer
from keras.src.saving import serialization_lib
class RandomInitializer(Initializer):
def __init__(self, seed=None):
self._init_seed = seed
if seed is None:
seed = random.make_default_seed()
elif isinstance(seed, dict):
seed = serialization_lib.deserialize_keras_object(seed)
elif not isinstance(seed, (int, random.SeedGenerator)):
raise ValueError(
"`seed` argument should be an instance of "
"`keras.random.SeedGenerator()` or an integer. "
f"Received: seed={seed}"
)
self.seed = seed
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {"seed": seed_config}
@keras_export(
[
"keras.initializers.RandomNormal",
"keras.initializers.random_normal",
]
)
class RandomNormal(RandomInitializer):
"""Random normal initializer.
Draws samples from a normal distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.TruncatedNormal",
"keras.initializers.truncated_normal",
]
)
class TruncatedNormal(RandomInitializer):
"""Initializer that generates a truncated normal distribution.
The values generated are similar to values from a
`RandomNormal` initializer, except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.truncated_normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.RandomUniform",
"keras.initializers.random_uniform",
]
)
class RandomUniform(RandomInitializer):
"""Random uniform initializer.
Draws samples from a uniform distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar keras tensor. Lower bound of the
range of random values to generate (inclusive).
maxval: A python scalar or a scalar keras tensor. Upper bound of the
range of random values to generate (exclusive).
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.uniform(
shape=shape,
minval=self.minval,
maxval=self.maxval,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"minval": self.minval, "maxval": self.maxval}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.VarianceScaling",
"keras.initializers.variance_scaling",
]
)
class VarianceScaling(RandomInitializer):
"""Initializer that adapts its scale to the shape of its input tensors.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero
and a standard deviation (after truncation, if used) `stddev = sqrt(scale /
n)`, where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of `"fan_in"`, `"fan_out"`, `"fan_avg"`.
distribution: Random distribution to use.
One of `"truncated_normal"`, `"untruncated_normal"`, or `"uniform"`.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(
self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
):
if scale <= 0.0:
raise ValueError(
"Argument `scale` must be positive float. "
f"Received: scale={scale}"
)
allowed_modes = {"fan_in", "fan_out", "fan_avg"}
if mode not in allowed_modes:
raise ValueError(
f"Invalid `mode` argument: {mode}. "
f"Please use one of {allowed_modes}"
)
distribution = distribution.lower()
if distribution == "normal":
distribution = "truncated_normal"
allowed_distributions = {
"uniform",
"truncated_normal",
"untruncated_normal",
}
if distribution not in allowed_distributions:
raise ValueError(
f"Invalid `distribution` argument: {distribution}."
f"Please use one of {allowed_distributions}"
)
self.scale = scale
self.mode = mode
self.distribution = distribution
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
scale = self.scale
fan_in, fan_out = compute_fans(shape)
if self.mode == "fan_in":
scale /= max(1.0, fan_in)
elif self.mode == "fan_out":
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == "truncated_normal":
stddev = math.sqrt(scale) / 0.87962566103423978
return random.truncated_normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random.normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
else:
limit = math.sqrt(3.0 * scale)
return random.uniform(
shape, minval=-limit, maxval=limit, dtype=dtype, seed=self.seed
)
def get_config(self):
base_config = super().get_config()
config = {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.GlorotUniform",
"keras.initializers.glorot_uniform",
]
)
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input
units in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.GlorotNormal",
"keras.initializers.glorot_normal",
]
)
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of
input units in the weight tensor and `fan_out` is the number of output units
in the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.LecunNormal",
"keras.initializers.lecun_normal",
]
)
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.LecunUniform",
"keras.initializers.lecun_uniform",
]
)
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(["keras.initializers.HeNormal", "keras.initializers.he_normal"])
class HeNormal(VarianceScaling):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(["keras.initializers.HeUniform", "keras.initializers.he_uniform"])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
def compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple.
Returns:
A tuple of integer scalars: `(fan_in, fan_out)`.
"""
shape = tuple(shape)
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
@keras_export(
[
"keras.initializers.Orthogonal",
"keras.initializers.orthogonal",
"keras.initializers.OrthogonalInitializer",
]
)
class Orthogonal(RandomInitializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is
initialized with an orthogonal matrix obtained from the QR decomposition of
a matrix of random numbers drawn from a normal distribution. If the matrix
has fewer rows than columns then the output will have orthogonal rows.
Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = keras.initializers.Orthogonal()
>>> layer = keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the orthogonal matrix.
seed: A Python integer. Used to make the behavior of the initializer
deterministic.
Reference:
- [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
if len(shape) < 2:
raise ValueError(
"The tensor to initialize must be "
"at least two-dimensional. Received: "
f"shape={shape} of rank {len(shape)}."
)
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random.normal(flat_shape, seed=self.seed, dtype=dtype)
# Compute the qr factorization
q, r = ops.qr(a)
# Make Q uniform
d = ops.diag(r)
q *= ops.sign(d)
if num_rows < num_cols:
q = ops.transpose(q)
return self.gain * ops.reshape(q, shape)
def get_config(self):
base_config = super().get_config()
config = {"gain": self.gain}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/random_initializers_test.py | keras/src/initializers/random_initializers_test.py | import numpy as np
from conftest import skip_if_backend
from keras.src import backend
from keras.src import initializers
from keras.src import random
from keras.src import testing
from keras.src import utils
class RandomInitializersTest(testing.TestCase):
def test_random_normal(self):
utils.set_random_seed(1337)
shape = (25, 20)
mean = 0.0
stddev = 1.0
seed = 1234
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=seed
)
values = initializer(shape=shape)
self.assertEqual(initializer.mean, mean)
self.assertEqual(initializer.stddev, stddev)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)), stddev, atol=1e-1
)
self.run_class_serialization_test(initializer)
# Test that a fixed seed yields the same results each call.
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=1337
)
values = initializer(shape=shape)
next_values = initializer(shape=shape)
self.assertAllClose(values, next_values)
# Test that a SeedGenerator yields different results each call.
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=backend.random.SeedGenerator(1337)
)
values = initializer(shape=shape)
next_values = initializer(shape=shape)
self.assertNotAllClose(values, next_values)
# Test serialization with SeedGenerator
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=backend.random.SeedGenerator(1337)
)
values = initializer(shape=shape)
# Test that unseeded generator gets different results after cloning
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=None
)
values = initializer(shape=shape)
cloned_initializer = initializers.RandomNormal.from_config(
initializer.get_config()
)
new_values = cloned_initializer(shape=shape)
self.assertNotAllClose(values, new_values)
# Test that seeded generator gets same results after cloning
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=1337
)
values = initializer(shape=shape)
cloned_initializer = initializers.RandomNormal.from_config(
initializer.get_config()
)
new_values = cloned_initializer(shape=shape)
self.assertAllClose(values, new_values)
def test_random_uniform(self):
shape = (5, 5)
minval = -1.0
maxval = 1.0
seed = 1234
initializer = initializers.RandomUniform(
minval=minval, maxval=maxval, seed=seed
)
values = initializer(shape=shape)
self.assertEqual(initializer.minval, minval)
self.assertEqual(initializer.maxval, maxval)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
values = backend.convert_to_numpy(values)
self.assertGreaterEqual(np.min(values), minval)
self.assertLess(np.max(values), maxval)
self.run_class_serialization_test(initializer)
def test_variance_scaling(self):
utils.set_random_seed(1337)
shape = (25, 20)
scale = 2.0
seed = 1234
initializer = initializers.VarianceScaling(
scale=scale, seed=seed, mode="fan_in"
)
values = initializer(shape=shape)
self.assertEqual(initializer.scale, scale)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)),
np.sqrt(scale / 25),
atol=1e-1,
)
self.run_class_serialization_test(initializer)
initializer = initializers.VarianceScaling(
scale=scale, seed=seed, mode="fan_out"
)
values = initializer(shape=shape)
self.assertEqual(initializer.scale, scale)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)),
np.sqrt(scale / 20),
atol=1e-1,
)
self.run_class_serialization_test(initializer)
@skip_if_backend("openvino", "openvino backend does not support `qr`")
def test_orthogonal(self):
shape = (5, 5)
gain = 2.0
seed = 1234
initializer = initializers.Orthogonal(gain=gain, seed=seed)
values = initializer(shape=shape)
self.assertEqual(initializer.seed, seed)
self.assertEqual(initializer.gain, gain)
self.assertEqual(values.shape, shape)
array = backend.convert_to_numpy(values)
# Making sure that the columns have gain * unit norm value
for column in array.T:
self.assertAlmostEqual(np.linalg.norm(column), gain * 1.0)
# Making sure that each column is orthonormal to the other column
for i in range(array.shape[-1]):
for j in range(i + 1, array.shape[-1]):
self.assertAlmostEqual(
np.dot(array[..., i], array[..., j]), 0.0
)
self.run_class_serialization_test(initializer)
# Test compatible class_name
initializer = initializers.get("OrthogonalInitializer")
self.assertIsInstance(initializer, initializers.Orthogonal)
def test_get_method(self):
obj = initializers.get("glorot_normal")
self.assertTrue(obj, initializers.GlorotNormal)
obj = initializers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
initializers.get("typo")
@skip_if_backend(
"openvino", "openvino backend does not support `uniform` with None seed"
)
def test_get_method_with_tensor(self):
shape = (5, 5)
# Test backend tensor
tensor = random.uniform(shape=shape)
initializer = initializers.get(tensor)
values = initializer(shape=shape)
self.assertAllClose(values, tensor)
# Test numpy array
tensor = np.random.uniform(size=shape).astype("float32")
initializer = initializers.get(tensor)
values = initializer(shape=shape)
self.assertAllClose(values, tensor)
# Test bad `shape` argument
with self.assertRaisesRegex(ValueError, r"Expected `shape` to be"):
initializer(shape=(10, 10))
def test_variance_scaling_invalid_scale(self):
seed = 1234
with self.assertRaisesRegex(
ValueError, "Argument `scale` must be positive float."
):
initializers.VarianceScaling(scale=-1.0, seed=seed, mode="fan_in")
def test_variance_scaling_invalid_mode(self):
scale = 2.0
seed = 1234
with self.assertRaisesRegex(ValueError, "Invalid `mode` argument:"):
initializers.VarianceScaling(
scale=scale, seed=seed, mode="invalid_mode"
)
def test_variance_scaling_invalid_distribution(self):
scale = 2.0
seed = 1234
with self.assertRaisesRegex(
ValueError, "Invalid `distribution` argument:"
):
initializers.VarianceScaling(
scale=scale,
seed=seed,
mode="fan_in",
distribution="invalid_dist",
)
def test_serialization_with_seed_generator(self):
seed = random.SeedGenerator()
initializer = initializers.Orthogonal(seed=seed)
self.run_class_serialization_test(initializer)
seed = random.SeedGenerator()
initializer = initializers.VarianceScaling(seed=seed)
self.run_class_serialization_test(initializer)
seed = random.SeedGenerator()
initializer = initializers.RandomUniform(seed=seed)
self.run_class_serialization_test(initializer)
seed = random.SeedGenerator()
initializer = initializers.RandomNormal(seed=seed)
self.run_class_serialization_test(initializer)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/__init__.py | keras/src/initializers/__init__.py | import inspect
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import STFT
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import Orthogonal
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
STFT,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
Orthogonal,
RandomNormal,
RandomUniform,
TruncatedNormal,
VarianceScaling,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"IdentityInitializer": Identity, # For compatibility
"normal": RandomNormal,
"one": Ones,
"STFTInitializer": STFT, # For compatibility
"OrthogonalInitializer": Orthogonal, # For compatibility
"uniform": RandomUniform,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.get(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.get(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
You may also pass a callable function with a signature that includes `shape`
and `dtype=None` as an identifier.
>>> fn = lambda shape, dtype=None: ops.ones(shape, dtype)
>>> keras.initializers.get(fn)
<function <lambda> at ...>
Alternatively, you can pass a backend tensor or numpy array as the
`identifier` to define the initializer values directly. Note that when
calling the initializer, the specified `shape` argument must be the same as
the shape of the tensor.
>>> tensor = ops.ones(shape=(5, 5))
>>> keras.initializers.get(tensor)
<function get.<locals>.initialize_fn at ...>
Args:
identifier: A string, dict, callable function, or tensor specifying
the initializer. If a string, it should be the name of an
initializer. If a dict, it should contain the configuration of an
initializer. Callable functions or predefined tensors are also
accepted.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
elif ops.is_tensor(identifier) or isinstance(
identifier, (np.generic, np.ndarray)
):
def initialize_fn(shape, dtype=None):
dtype = backend.standardize_dtype(dtype)
if backend.standardize_shape(shape) != backend.standardize_shape(
identifier.shape
):
raise ValueError(
f"Expected `shape` to be {identifier.shape} for direct "
f"tensor as initializer. Received shape={shape}"
)
return ops.cast(identifier, dtype)
obj = initialize_fn
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/constant_initializers.py | keras/src/initializers/constant_initializers.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import standardize_dtype
from keras.src.initializers.initializer import Initializer
from keras.src.saving import serialization_lib
from keras.src.utils.module_utils import scipy
@keras_export(["keras.initializers.Constant", "keras.initializers.constant"])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = Constant(10.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Constant(10.)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0.0):
self.value = value
def __call__(self, shape, dtype=None):
dtype = standardize_dtype(dtype)
return ops.cast(self.value, dtype=dtype) * ops.ones(
shape=shape, dtype=dtype
)
def get_config(self):
return {"value": serialization_lib.serialize_keras_object(self.value)}
@classmethod
def from_config(cls, config):
value = serialization_lib.deserialize_keras_object(config["value"])
return cls(value)
@keras_export(["keras.initializers.Zeros", "keras.initializers.zeros"])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Examples:
>>> # Standalone usage:
>>> initializer = Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Zeros()
>>> layer = Dense(units=3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
dtype = standardize_dtype(dtype)
return ops.zeros(shape, dtype=dtype)
@keras_export(["keras.initializers.Ones", "keras.initializers.ones"])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `ones`.
Examples:
>>> # Standalone usage:
>>> initializer = Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Ones()
>>> layer = Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
dtype = standardize_dtype(dtype)
return ops.ones(shape, dtype=dtype)
@keras_export(
[
"keras.initializers.Identity",
"keras.initializers.identity",
"keras.initializers.IdentityInitializer",
]
)
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Identity()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
if len(shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices. "
f"Received: shape={shape} of rank {len(shape)}."
)
dtype = standardize_dtype(dtype)
return self.gain * ops.eye(*shape, dtype=dtype)
@keras_export(
[
"keras.initializers.STFT",
"keras.initializers.stft",
"keras.initializers.STFTInitializer",
]
)
class STFT(Initializer):
"""Initializer of Conv kernels for Short-term Fourier Transformation (STFT).
Since the formula involves complex numbers, this class compute either the
real or the imaginary components of the final output.
Additionally, this initializer supports windowing functions across the time
dimension as commonly used in STFT. Windowing functions from the module
`scipy.signal.windows` are supported, including the common `hann` and
`hamming` windowing functions. This layer supports periodic windows and
scaling-based normalization.
This is primarily intended for use in the `STFTSpectrogram` layer.
Examples:
>>> # Standalone usage:
>>> initializer = STFTInitializer("real", "hann", "density", False)
>>> values = initializer(shape=(128, 1, 513))
Args:
side: String, `"real"` or `"imag"` deciding if the kernel will compute
the real side or the imaginary side of the output. Defaults to
`"real"`.
window: String for the name of the windowing function in the
`scipy.signal.windows` module, or array_like for the window values,
or `None` for no windowing.
scaling: String, `"density"` or `"spectrum"` for scaling of the window
for normalization, either L2 or L1 normalization.
`None` for no scaling.
periodic: Boolean, if True, the window function will be treated as
periodic. Defaults to `False`.
"""
def __init__(
self, side="real", window="hann", scaling="density", periodic=False
):
if side not in ["real", "imag"]:
raise ValueError(f"side should be 'real' or 'imag', not {side}")
if isinstance(window, str):
# throws an exception for invalid window function
scipy.signal.get_window(window, 1)
if scaling is not None and scaling not in ["density", "spectrum"]:
raise ValueError(
"Scaling is invalid, it must be `None`, 'density' "
f"or 'spectrum'. Received scaling={scaling}"
)
self.side = side
self.window = window
self.scaling = scaling
self.periodic = periodic
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
The shape is assumed to be `(T, 1, F // 2 + 1)`, where `T` is the size
of the given window, and `F` is the number of frequency bands. Only half
the frequency bands are used, which is a common practice in STFT,
because the second half are the conjugates of the first half in
a reversed order.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
dtype = standardize_dtype(dtype)
frame_length, input_channels, fft_length = shape
win = None
scaling = 1
if self.window is not None:
win = self.window
if isinstance(win, str):
# Using SciPy since it provides more windowing functions,
# easier to be compatible with multiple backends.
win = scipy.signal.get_window(win, frame_length, self.periodic)
win = ops.convert_to_tensor(win, dtype=dtype)
if len(win.shape) != 1 or win.shape[-1] != frame_length:
raise ValueError(
"The shape of `window` must be equal to [frame_length]."
f"Received: window shape={win.shape}"
)
win = ops.reshape(win, [frame_length, 1, 1])
if self.scaling == "density":
scaling = ops.sqrt(ops.sum(ops.square(win)))
elif self.scaling == "spectrum":
scaling = ops.sum(ops.abs(win))
_fft_length = (fft_length - 1) * 2
freq = ops.divide(
ops.reshape(
ops.arange(fft_length, dtype=dtype), (1, 1, fft_length)
),
_fft_length,
)
time = ops.reshape(
ops.arange(frame_length, dtype=dtype), (frame_length, 1, 1)
)
args = ops.multiply(ops.multiply(-2, time), freq) * ops.arccos(
ops.cast(-1, dtype)
)
if self.side == "real":
kernel = ops.cast(ops.cos(args), dtype)
else:
kernel = ops.cast(ops.sin(args), dtype)
if win is not None:
kernel = ops.divide(ops.multiply(kernel, win), scaling)
return kernel
def get_config(self):
return {
"side": self.side,
"window": self.window,
"periodic": self.periodic,
"scaling": self.scaling,
}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/initializer.py | keras/src/initializers/initializer.py | from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you can also implement the method `get_config()` and the class
method `from_config` in order to support serialization, just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
def clone(self):
return self.__class__.from_config(self.get_config())
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/initializers/constant_initializers_test.py | keras/src/initializers/constant_initializers_test.py | import numpy as np
import scipy.signal
from conftest import skip_if_backend
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_constant_initializer_array_value(self):
shape = (3, 3)
constant_value = np.random.random((3, 3))
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
@skip_if_backend("openvino", "openvino backend does not support `eye`")
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
# Test compatible class_name
initializer = initializers.get("IdentityInitializer")
self.assertIsInstance(initializer, initializers.Identity)
@skip_if_backend("openvino", "openvino backend does not support `arange`")
def test_stft_initializer(self):
shape = (256, 1, 513)
time_range = np.arange(256).reshape((-1, 1, 1))
freq_range = (np.arange(513) / 1024.0).reshape((1, 1, -1))
pi = np.arccos(np.float32(-1))
args = -2 * pi * time_range * freq_range
tol_kwargs = {"atol": 1e-4, "rtol": 1e-6}
initializer = initializers.STFT("real", None)
values = backend.convert_to_numpy(initializer(shape))
self.assertAllClose(np.cos(args), values, atol=1e-4)
self.run_class_serialization_test(initializer)
initializer = initializers.STFT(
"real",
"hamming",
None,
True,
)
window = scipy.signal.windows.get_window("hamming", 256, True)
window = window.astype("float32").reshape((-1, 1, 1))
values = backend.convert_to_numpy(initializer(shape, "float32"))
self.assertAllClose(np.cos(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFT(
"imag",
"tukey",
"density",
False,
)
window = scipy.signal.windows.get_window("tukey", 256, False)
window = window.astype("float32").reshape((-1, 1, 1))
window = window / np.sqrt(np.sum(window**2))
values = backend.convert_to_numpy(initializer(shape, "float32"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFT(
"imag",
list(range(1, 257)),
"spectrum",
)
window = np.arange(1, 257)
window = window.astype("float32").reshape((-1, 1, 1))
window = window / np.sum(window)
values = backend.convert_to_numpy(initializer(shape, "float32"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
with self.assertRaises(ValueError):
initializers.STFT("imaginary")
with self.assertRaises(ValueError):
initializers.STFT("real", scaling="l2")
with self.assertRaises(ValueError):
initializers.STFT("real", window="unknown")
# Test compatible class_name
initializer = initializers.get("STFTInitializer")
self.assertIsInstance(initializer, initializers.STFT)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distillation/distiller_test.py | keras/src/distillation/distiller_test.py | import json
import os
import numpy as np
import pytest
import keras
from keras.src.distillation.distillation_loss import LogitsDistillation
from keras.src.distillation.distiller import Distiller
from keras.src.testing import TestCase
class SimpleTeacher(keras.Model):
"""Simple teacher model for testing."""
def __init__(self, vocab_size=10, hidden_dim=32):
super().__init__()
self.dense1 = keras.layers.Dense(hidden_dim, activation="relu")
self.dense2 = keras.layers.Dense(vocab_size)
def call(self, inputs, training=None):
x = self.dense1(inputs)
return self.dense2(x)
class SimpleStudent(keras.Model):
"""Simple student model for testing."""
def __init__(self, vocab_size=10, hidden_dim=16):
super().__init__()
self.dense1 = keras.layers.Dense(hidden_dim, activation="relu")
self.dense2 = keras.layers.Dense(vocab_size)
def call(self, inputs, training=None):
x = self.dense1(inputs)
return self.dense2(x)
@pytest.mark.requires_trainable_backend
class TestDistiller(TestCase):
"""Essential test cases for the Distiller class."""
def setUp(self):
"""Set up test fixtures."""
super().setUp()
# Create test data
self.x = np.random.random((20, 5)).astype(np.float32)
self.y = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create teacher and student models
self.teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models
dummy_input = self.x[:2]
self.teacher(dummy_input)
self.student(dummy_input)
# Create distillation distillation_loss
self.distillation_loss = LogitsDistillation(temperature=2.0)
# Create distiller
self.distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
self.distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
def test_distiller_initialization(self):
"""Test Distiller initialization."""
# Check that teacher is frozen
self.assertFalse(self.teacher.trainable)
# Check that student is trainable
self.assertTrue(self.student.trainable)
# Check student_loss_weight
self.assertEqual(self.distiller.student_loss_weight, 0.5)
# Check distillation_loss (should be a list with one distillation_loss)
self.assertIsInstance(self.distiller.distillation_losses, list)
self.assertEqual(len(self.distiller.distillation_losses), 1)
self.assertIsInstance(
self.distiller.distillation_losses[0], LogitsDistillation
)
# Check that distillation_loss has the correct temperature
self.assertEqual(self.distiller.distillation_losses[0].temperature, 2.0)
# Check that model is compiled
self.assertIsNotNone(self.distiller.optimizer)
# Check if the model has been compiled (different backends may handle
# this differently)
self.assertTrue(
hasattr(self.distiller, "_compile_config")
or hasattr(self.distiller, "compiled_loss"),
"Model should be compiled",
)
def test_distiller_call(self):
"""Test Distiller call method (inference)."""
# Call should return student outputs
outputs = self.distiller(self.x)
# Check output shape
expected_shape = (20, 10) # batch_size, vocab_size
self.assertEqual(outputs.shape, expected_shape)
# Check that outputs are from student, not teacher
student_outputs = self.student(self.x)
self.assertAllClose(outputs, student_outputs)
def test_teacher_freezing(self):
"""Test that teacher is properly frozen."""
# Teacher should be frozen
self.assertFalse(self.teacher.trainable)
# Student should be trainable
self.assertTrue(self.student.trainable)
# Create a new teacher that is trainable and verify it gets frozen
new_teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.assertTrue(new_teacher.trainable) # Should be trainable initially
# Create distiller - should freeze the teacher
Distiller(
teacher=new_teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Teacher should now be frozen
self.assertFalse(new_teacher.trainable)
def test_model_compatibility_validation(self):
"""Test model compatibility validation."""
# Test with non-Keras objects
with self.assertRaises(ValueError):
Distiller(
teacher="not_a_model",
student=self.student,
distillation_losses=self.distillation_loss,
)
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student="not_a_model",
distillation_losses=self.distillation_loss,
)
def test_multi_distillation_loss_functionality(self):
"""Test multi-distillation_loss functionality."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
distillation_loss_weights = [0.7, 0.3]
# Create distiller with multiple distillation_loss
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=distillation_loss_weights,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that distillation_loss are stored correctly
self.assertEqual(len(distiller.distillation_losses), 2)
self.assertEqual(distiller.distillation_loss_weights, [0.7, 0.3])
# Test training
x = np.random.random((10, 5)).astype(np.float32)
y = np.random.randint(0, 10, (10,))
history = distiller.fit(x, y, epochs=1, verbose=0)
# Check metrics
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
def test_multi_distillation_loss_validation(self):
"""Test multi-distillation_loss validation."""
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
# Test that validation passes for valid configurations
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
student_loss_weight=0.5,
)
self.assertEqual(len(distiller.distillation_losses), 2)
# Test invalid distillation_loss weights length
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0], # Wrong length
student_loss_weight=0.5,
)
def test_student_loss_weighting(self):
"""Test student loss weighting functionality."""
# Test with student_loss_weight = 0.0 (only distillation loss)
distiller_0 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.0,
)
# Test with student_loss_weight = 1.0 (only student loss)
distiller_1 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=1.0,
)
# Compile both distillers
distiller_0.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
distiller_1.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that they can be used for training without errors
small_x = self.x[:5]
small_y = self.y[:5]
# Both should train without errors
history_0 = distiller_0.fit(small_x, small_y, epochs=1, verbose=0)
history_1 = distiller_1.fit(small_x, small_y, epochs=1, verbose=0)
# Check that training completed
self.assertIn("total_loss", history_0.history)
self.assertIn("total_loss", history_1.history)
def test_full_training_workflow(self):
"""Test complete training workflow with model.fit() - MOST IMPORTANT."""
# Create larger dataset for training
np.random.seed(42)
x_train = np.random.random((100, 5)).astype(np.float32)
y_train = np.random.randint(0, 10, (100,)).astype(np.int32)
x_val = np.random.random((20, 5)).astype(np.float32)
y_val = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create fresh models for training
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_train[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train the model
history = distiller.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
epochs=3,
batch_size=16,
verbose=0,
)
# Check that training completed
self.assertIn("total_loss", history.history)
self.assertIn("val_total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Check that losses are finite
for loss_name in ["total_loss", "student_loss", "distillation_loss"]:
losses = history.history[loss_name]
self.assertGreater(len(losses), 0)
for loss in losses:
self.assertTrue(np.isfinite(loss))
# Check that the model can make predictions
predictions = distiller.predict(x_val[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10)) # batch_size, vocab_size
# Check that student weights have changed (indicating learning)
initial_weights = [w.numpy().copy() for w in student.trainable_weights]
# Train a bit more
distiller.fit(x_train[:10], y_train[:10], epochs=1, verbose=0)
final_weights = [w.numpy() for w in student.trainable_weights]
# At least some weights should have changed
weights_changed = any(
not np.allclose(initial, final, atol=1e-6)
for initial, final in zip(initial_weights, final_weights)
)
self.assertTrue(
weights_changed, "Student weights should change during training"
)
def test_evaluation_workflow(self):
"""Test evaluation workflow with model.evaluate()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((30, 5)).astype(np.float32)
y_test = np.random.randint(0, 10, (30,)).astype(np.int32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train briefly
distiller.fit(x_test[:10], y_test[:10], epochs=1, verbose=0)
# Evaluate the model
results = distiller.evaluate(x_test, y_test, verbose=0)
# Check that evaluation returns expected metrics
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
# All results should be finite
for result in results:
self.assertTrue(np.isfinite(result))
def test_prediction_workflow(self):
"""Test prediction workflow with model.predict()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((20, 5)).astype(np.float32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Make predictions
predictions = distiller.predict(x_test, verbose=0)
# Check prediction shape
self.assertEqual(predictions.shape, (20, 10)) # batch_size, vocab_size
# Check that predictions are finite
self.assertTrue(np.all(np.isfinite(predictions)))
# Check predictions sum to reasonable values (not zeros/infinities)
prediction_sums = np.sum(predictions, axis=1)
self.assertTrue(np.all(np.isfinite(prediction_sums)))
def test_distiller_serialization_and_saving(self):
"""Test Distiller serialization, saving, and loading."""
# Use standard Sequential models for serialization testing
teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
student = keras.Sequential(
[
keras.layers.Dense(
16, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
8, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
# Create distiller with single distillation_loss
distillation_loss = LogitsDistillation(
temperature=3.0, loss="kl_divergence"
)
original_distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=distillation_loss,
student_loss_weight=0.7,
)
# Build the models by calling them
x_test = np.random.random((2, 20)).astype(np.float32)
_ = original_distiller(x_test)
# Test get_config
config = original_distiller.get_config()
# Verify all components are in config
required_keys = [
"teacher",
"student",
"distillation_losses",
"distillation_loss_weights",
"student_loss_weight",
]
for key in required_keys:
self.assertIn(key, config, f"Missing key: {key}")
# Test JSON serialization
json_str = json.dumps(config)
self.assertIsInstance(json_str, str)
# Test from_config reconstruction
reconstructed_distiller = Distiller.from_config(config)
# Verify reconstruction
self.assertEqual(reconstructed_distiller.student_loss_weight, 0.7)
self.assertIsInstance(
reconstructed_distiller.distillation_losses[0], LogitsDistillation
)
# Verify distillation_loss parameters
self.assertEqual(
reconstructed_distiller.distillation_losses[0].temperature, 3.0
)
# Test that reconstructed distiller can be used for inference
reconstructed_output = reconstructed_distiller(x_test)
self.assertEqual(reconstructed_output.shape, (2, 10))
# Test model saving and loading (full integration test)
temp_dir = self.get_temp_dir()
model_path = os.path.join(temp_dir, "distiller_model.keras")
# Compile original distiller
original_distiller.compile(
loss="sparse_categorical_crossentropy",
)
# Save the model
original_distiller.save(model_path)
# Load the model
loaded_distiller = keras.models.load_model(model_path)
# Verify loaded model works
loaded_output = loaded_distiller(x_test)
self.assertEqual(loaded_output.shape, (2, 10))
# Verify parameters are preserved
self.assertEqual(loaded_distiller.student_loss_weight, 0.7)
# The core serialization functionality is working
self.assertTrue(True, "Distiller serialization test passed")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distillation/distiller.py | keras/src/distillation/distiller.py | import keras
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.distillation.distillation_loss import _convert_loss_to_function
from keras.src.models.model import Model
from keras.src.saving import serialization_lib
@keras_export("keras.distillation.Distiller")
class Distiller(Model):
"""Distillation model for transferring knowledge from teacher to student.
Knowledge distillation transfers knowledge from a large, complex model
(teacher) to a smaller, simpler model (student). The student learns
from both ground truth labels and the teacher's predictions, often
achieving better performance than training on labels alone.
Arguments:
teacher: A trained `keras.Model` that serves as the knowledge source.
The teacher model is frozen during distillation.
student: A `keras.Model` to be trained through distillation.
distillation_losses: List of distillation losses to apply. Can be a
single distillation loss or a list of distillation losses like
`keras.distillation.LogitsDistillation`,
`keras.distillation.FeatureDistillation`, or custom distillation
losses.
distillation_loss_weights: List of weights for each distillation loss.
Must have the same length as `distillation_losses`. If `None`,
equal weights are used.
student_loss_weight: Weight for the student's supervised loss component.
Must be between 0 and 1. Defaults to 0.5.
name: Name for the distiller model. Defaults to `"distiller"`.
**kwargs: Additional keyword arguments passed to the parent `Model`
class.
Attributes:
student: The student model being trained. Access this to get the trained
student model for independent use after distillation training.
teacher: The teacher model providing knowledge. This model is frozen
during training.
Examples:
```python
# Basic distillation with KerasHub models
import keras_hub as hub
teacher = hub.models.CausalLM.from_preset("gemma_2b_en")
student = hub.models.CausalLM.from_preset(
"gemma_1.1_2b_en", load_weights=False
)
# Single distillation loss
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=LogitsDistillation(temperature=3.0),
)
# Compile the distiller (like any Keras model)
distiller.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train the distiller
distiller.fit(x_train, y_train, epochs=10)
# Access the trained student model
trained_student = distiller.student
# Multiple distillation losses
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=[
LogitsDistillation(temperature=3.0),
FeatureDistillation(
teacher_layer_name="dense_1",
student_layer_name="dense_1"
)
],
distillation_loss_weights=[1.0, 0.5],
)
# Compile with custom settings
distiller.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
```
"""
def __init__(
self,
teacher,
student,
distillation_losses,
distillation_loss_weights=None,
student_loss_weight=0.5,
name="distiller",
**kwargs,
):
super().__init__(name=name, **kwargs)
# Validate inputs
self._validate_models(teacher, student)
# Store configuration
self.teacher = teacher
self.student = student
# Validate student_loss_weight
if not isinstance(student_loss_weight, (int, float)):
raise ValueError(
f"student_loss_weight must be a number, got "
f"{type(student_loss_weight)}"
)
if student_loss_weight < 0.0 or student_loss_weight > 1.0:
raise ValueError(
f"student_loss_weight must be between 0.0 and 1.0, "
f"got {student_loss_weight}"
)
self.student_loss_weight = student_loss_weight
# Handle distillation losses configuration
if distillation_losses is None:
raise ValueError(
"'distillation_losses' cannot be `None`. Provide a "
"distillation loss (e.g., LogitsDistillation or "
"FeatureDistillation) or a list of distillation losses."
)
# Convert single distillation loss to list for uniform handling
if not isinstance(distillation_losses, (list, tuple)):
self.distillation_losses = [distillation_losses]
self.distillation_loss_weights = [1.0]
else:
self.distillation_losses = distillation_losses
# Set default weights if not provided
if distillation_loss_weights is None:
self.distillation_loss_weights = [1.0] * len(
distillation_losses
)
else:
if len(distillation_loss_weights) != len(distillation_losses):
raise ValueError(
f"Number of distillation_loss_weights "
f"({len(distillation_loss_weights)}) must match "
f"number of distillation_losses "
f"({len(distillation_losses)})"
)
self.distillation_loss_weights = distillation_loss_weights
# Validate distillation loss compatibility and create extractors
for distillation_loss in self.distillation_losses:
self._validate_distillation_loss_compatibility(
teacher, student, distillation_loss
)
self._create_multi_feature_extractors()
# Freeze teacher model
self.teacher.trainable = False
# Initialize loss tracking metrics
self.student_loss_tracker = keras.metrics.Mean(name="student_loss")
self.distillation_loss_tracker = keras.metrics.Mean(
name="distillation_loss"
)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
def _validate_models(self, teacher, student):
"""Validate that teacher and student models are compatible."""
if not isinstance(teacher, keras.Model):
raise ValueError(
f"Teacher must be a keras.Model, got {type(teacher)}"
)
if not isinstance(student, keras.Model):
raise ValueError(
f"Student must be a keras.Model, got {type(student)}"
)
self._validate_input_compatibility(teacher, student)
self._validate_output_compatibility(teacher, student)
self._validate_dtype_compatibility(teacher, student)
def _assert_shapes_are_compatible(self, shape1, shape2, context):
"""Assert that two shapes are compatible."""
if len(shape1) != len(shape2):
raise ValueError(
f"Teacher and student {context} shapes have different "
f"dimensions. Teacher: {shape1}, Student: {shape2}."
)
for dim1, dim2 in zip(shape1, shape2):
if dim1 is not None and dim2 is not None and dim1 != dim2:
raise ValueError(
f"Teacher and student {context} shapes are incompatible. "
f"Teacher: {shape1}, Student: {shape2}. "
f"All dimensions must match."
)
def _assert_same_dtype(self, teacher_dtype, student_dtype, context):
"""Assert that teacher and student dtypes are the same."""
if teacher_dtype != student_dtype:
raise ValueError(
f"Teacher and student {context} dtypes must match. "
f"Teacher: {teacher_dtype}, Student: {student_dtype}."
)
def _validate_input_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible input shapes."""
if not hasattr(teacher, "inputs") or not hasattr(student, "inputs"):
return
teacher_inputs = getattr(teacher, "inputs")
student_inputs = getattr(student, "inputs")
if teacher_inputs is None or student_inputs is None:
return
tree.map_structure(
lambda ti, si: self._assert_shapes_are_compatible(
ti.shape, si.shape, "input"
),
teacher_inputs,
student_inputs,
)
def _validate_output_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible output shapes."""
if not hasattr(teacher, "outputs") or not hasattr(student, "outputs"):
return
teacher_outputs = getattr(teacher, "outputs")
student_outputs = getattr(student, "outputs")
if teacher_outputs is None or student_outputs is None:
return
tree.map_structure(
lambda to, so: self._assert_shapes_are_compatible(
to.shape, so.shape, "output"
),
teacher_outputs,
student_outputs,
)
def _validate_dtype_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible data types."""
if not hasattr(teacher, "inputs") or not hasattr(student, "inputs"):
return
if teacher.inputs is None or student.inputs is None:
return
tree.map_structure(
lambda ti, si: self._assert_same_dtype(ti.dtype, si.dtype, "input"),
teacher.inputs,
student.inputs,
)
if not hasattr(teacher, "outputs") or not hasattr(student, "outputs"):
return
if teacher.outputs is None or student.outputs is None:
return
tree.map_structure(
lambda to, so: self._assert_same_dtype(
to.dtype, so.dtype, "output"
),
teacher.outputs,
student.outputs,
)
def _validate_distillation_loss_compatibility(
self, teacher, student, distillation_loss
):
"""Validate that the distillation loss is compatible with teacher
and student models."""
distillation_loss.validate_model_compatibility(teacher, student)
def _create_multi_feature_extractors(self):
"""Create feature extractors for efficient multi-layer extraction."""
teacher_layer_names = []
student_layer_names = []
for distillation_loss in self.distillation_losses:
if (
hasattr(distillation_loss, "teacher_layer_name")
and distillation_loss.teacher_layer_name
):
if (
distillation_loss.teacher_layer_name
not in teacher_layer_names
):
teacher_layer_names.append(
distillation_loss.teacher_layer_name
)
if (
hasattr(distillation_loss, "student_layer_name")
and distillation_loss.student_layer_name
):
if (
distillation_loss.student_layer_name
not in student_layer_names
):
student_layer_names.append(
distillation_loss.student_layer_name
)
self._teacher_feature_extractor = self._create_feature_extractor(
self.teacher, teacher_layer_names
)
self._student_feature_extractor = self._create_feature_extractor(
self.student, student_layer_names
)
def _create_feature_extractor(self, model, layer_names):
"""Create a feature extractor for a model.
Arguments:
model: The model to create an extractor for.
layer_names: List of layer names to extract features from.
Returns:
Feature extractor model or `None` if no layer names provided.
Raises:
ValueError: If model has no symbolic inputs/outputs.
"""
if not layer_names:
return None
if not hasattr(model, "inputs") or model.inputs is None:
raise ValueError(
f"Cannot create feature extractor for {model.name}. "
f"The model has no symbolic inputs attribute."
)
if isinstance(model, keras.Sequential):
final_output = model.layers[-1].output
else:
final_output = model.output
outputs = {"final_output": final_output}
for layer_name in layer_names:
layer = model.get_layer(name=layer_name)
outputs[layer_name] = layer.output
return keras.Model(
inputs=model.inputs,
outputs=outputs,
name=f"{model.name}_multi_feature_extractor",
)
def _extract_all_teacher_features(self, x):
"""Extract all teacher features in a single forward pass."""
if self._teacher_feature_extractor is not None:
return self._teacher_feature_extractor(x, training=False)
else:
return {"final_output": self.teacher(x, training=False)}
def _extract_all_student_features(self, x, y_pred):
"""Extract all student features in a single forward pass."""
if self._student_feature_extractor is not None:
return self._student_feature_extractor(x, training=True)
else:
return {"final_output": y_pred}
def _get_distillation_loss_features(
self, distillation_loss, all_features, is_teacher
):
"""Get the specific features needed by a distillation loss."""
if is_teacher:
layer_name = distillation_loss.teacher_layer_name or "final_output"
else:
layer_name = distillation_loss.student_layer_name or "final_output"
if layer_name not in all_features:
raise ValueError(
f"Layer '{layer_name}' not found in extracted features. "
f"Available: {list(all_features.keys())}"
)
return all_features[layer_name]
def compile(self, optimizer="adam", loss=None, metrics=None, **kwargs):
"""Compile the distiller with proper integration.
Arguments:
optimizer: Optimizer for training the student model.
loss: Student loss function for the student's supervised learning.
Can be a string identifier or a loss function instance.
metrics: Additional metrics to track during training.
**kwargs: Additional arguments passed to parent compile.
"""
if loss is None:
raise ValueError("'loss' cannot be `None`.")
self._student_loss = tree.map_structure(_convert_loss_to_function, loss)
self._student_loss_for_serialization = loss
if metrics is not None and not isinstance(metrics, (list, tuple)):
raise ValueError(
f"metrics must be a list or tuple, got {type(metrics)}"
)
super().compile(
optimizer=optimizer,
loss=None,
metrics=metrics,
**kwargs,
)
def call(self, inputs, training=None, **kwargs):
"""Forward pass returns student predictions."""
return self.student(inputs, training=training, **kwargs)
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, training=True
):
"""Compute combined distillation loss.
Arguments:
x: Input data.
y: Target data.
y_pred: Model predictions.
sample_weight: Sample weights (currently unused).
training: Whether the model is in training mode.
Returns:
Combined loss tensor.
"""
# Handle case where y_pred is not provided
if y_pred is None:
y_pred = self(x, training=training)
# Compute student loss
student_loss = 0.0
if self.student_loss_weight > 0.0 and y is not None:
loss_values = tree.map_structure(
lambda l, o, o_pred: l(o, o_pred),
self._student_loss,
y,
y_pred,
)
flat_losses = tree.flatten(loss_values)
student_loss = (
keras.ops.sum(keras.ops.stack(flat_losses))
if len(flat_losses) > 1
else flat_losses[0]
)
# Ensure student_loss is a scalar
if hasattr(student_loss, "shape") and len(student_loss.shape) > 0:
student_loss = keras.ops.mean(student_loss)
# Compute distillation loss
distillation_loss = 0.0
if self.student_loss_weight < 1.0:
teacher_features = self._extract_all_teacher_features(x)
student_features = self._extract_all_student_features(x, y_pred)
# Apply distillation losses using pre-extracted features
for distillation_loss_fn, weight in zip(
self.distillation_losses, self.distillation_loss_weights
):
# Get appropriate outputs/features for this distillation loss
if (
hasattr(distillation_loss_fn, "teacher_layer_name")
and distillation_loss_fn.teacher_layer_name is not None
):
# FeatureDistillation with specific layers
try:
distillation_loss_teacher_output = (
self._get_distillation_loss_features(
distillation_loss_fn,
teacher_features,
is_teacher=True,
)
)
distillation_loss_student_output = (
self._get_distillation_loss_features(
distillation_loss_fn,
student_features,
is_teacher=False,
)
)
except ValueError as e:
# Re-raise with context about which loss failed
raise RuntimeError(
f"Failed to extract features for "
f"{type(distillation_loss_fn).__name__} "
f"targeting teacher layer "
f"'{distillation_loss_fn.teacher_layer_name}' "
f"and student layer "
f"'{distillation_loss_fn.student_layer_name}'. "
f"Original error: {e}"
) from e
else:
# LogitsDistillation or FeatureDistillation (final outputs)
distillation_loss_teacher_output = teacher_features[
"final_output"
]
distillation_loss_student_output = y_pred
# Validate outputs are compatible for this distillation loss
distillation_loss_fn.validate_outputs(
distillation_loss_teacher_output,
distillation_loss_student_output,
)
# Compute loss for this distillation loss
current_distillation_loss = distillation_loss_fn.compute_loss(
distillation_loss_teacher_output,
distillation_loss_student_output,
)
# Validate that distillation loss returns a scalar
if (
hasattr(current_distillation_loss, "shape")
and len(current_distillation_loss.shape) > 0
):
raise ValueError(
f"Distillation loss "
f"{distillation_loss_fn.__class__.__name__} "
f"returned a non-scalar loss with shape "
f"{current_distillation_loss.shape}. "
f"The compute_loss method must return a scalar "
f"tensor."
)
# Apply weight and add to total
distillation_loss = keras.ops.add(
distillation_loss,
keras.ops.multiply(weight, current_distillation_loss),
)
# Combine losses
total_loss = keras.ops.add(
keras.ops.multiply(self.student_loss_weight, student_loss),
keras.ops.multiply(
keras.ops.subtract(1.0, self.student_loss_weight),
distillation_loss,
),
)
# Update metrics
self.student_loss_tracker.update_state(student_loss)
self.distillation_loss_tracker.update_state(distillation_loss)
self.total_loss_tracker.update_state(total_loss)
return total_loss
def reset_metrics(self):
"""Reset all metrics."""
super().reset_metrics()
self.student_loss_tracker.reset_state()
self.distillation_loss_tracker.reset_state()
self.total_loss_tracker.reset_state()
def get_config(self):
"""Get configuration for serialization."""
config = super().get_config()
config.update(
{
"teacher": serialization_lib.serialize_keras_object(
self.teacher
),
"student": serialization_lib.serialize_keras_object(
self.student
),
"distillation_losses": [
serialization_lib.serialize_keras_object(distillation_loss)
for distillation_loss in self.distillation_losses
],
"distillation_loss_weights": self.distillation_loss_weights,
"student_loss_weight": self.student_loss_weight,
}
)
return config
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
# Deserialize objects
config["teacher"] = serialization_lib.deserialize_keras_object(
config["teacher"]
)
config["student"] = serialization_lib.deserialize_keras_object(
config["student"]
)
config["distillation_losses"] = [
serialization_lib.deserialize_keras_object(distillation_loss)
for distillation_loss in config["distillation_losses"]
]
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distillation/distillation_loss_test.py | keras/src/distillation/distillation_loss_test.py | import numpy as np
import pytest
import keras
from keras.src.distillation.distillation_loss import FeatureDistillation
from keras.src.distillation.distillation_loss import LogitsDistillation
from keras.src.distillation.distiller import Distiller
from keras.src.testing import TestCase
@pytest.mark.requires_trainable_backend
class TestLogitsDistillation(TestCase):
"""Test cases for LogitsDistillation distillation_loss."""
def test_logits_distillation_basic(self):
"""Test basic logits distillation structure validation."""
# Create dummy logits
teacher_logits = keras.ops.convert_to_tensor(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype="float32"
)
student_logits = keras.ops.convert_to_tensor(
np.array([[2.0, 1.0, 4.0], [3.0, 6.0, 2.0]]), dtype="float32"
)
distillation_loss = LogitsDistillation(temperature=3.0)
distillation_loss.validate_outputs(teacher_logits, student_logits)
incompatible_logits = {"output": teacher_logits}
with self.assertRaises(ValueError):
distillation_loss.validate_outputs(
teacher_logits, incompatible_logits
)
@pytest.mark.requires_trainable_backend
class TestFeatureDistillation(TestCase):
"""Test cases for FeatureDistillation distillation_loss."""
def test_feature_distillation_basic(self):
"""Test basic feature distillation structure validation."""
# Create dummy features
teacher_features = keras.ops.convert_to_tensor(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype="float32"
)
student_features = keras.ops.convert_to_tensor(
np.array([[1.1, 2.1, 3.1], [4.1, 5.1, 6.1]]), dtype="float32"
)
distillation_loss = FeatureDistillation(loss="mse")
distillation_loss.validate_outputs(teacher_features, student_features)
incompatible_features = [teacher_features, teacher_features]
with self.assertRaises(ValueError):
distillation_loss.validate_outputs(
teacher_features, incompatible_features
)
@pytest.mark.requires_trainable_backend
class TestEndToEndDistillation(TestCase):
"""End-to-end distillation tests with real models."""
def setUp(self):
"""Set up models and test data for all tests."""
super().setUp()
# Create teacher model
self.teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
# Create student model
self.student = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
self.x = np.random.random((32, 20)).astype(np.float32)
self.y = np.random.randint(0, 10, (32,)).astype(np.int32)
self.teacher(self.x[:2])
self.student(self.x[:2])
def test_logits_distillation_end_to_end(self):
"""Test end-to-end logits distillation with real models."""
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=LogitsDistillation(temperature=3.0),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify loss values are reasonable
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(final_loss))
self.assertGreater(final_loss, 0.0)
# Test prediction
predictions = distiller.predict(self.x[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10))
# Test student model access
student_model = distiller.student
self.assertIsInstance(student_model, keras.Model)
def test_feature_distillation_end_to_end(self):
"""Test end-to-end feature distillation with real models."""
# Create distiller with feature distillation
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify feature extraction worked
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Test that feature extractors have correct outputs
self.assertEqual(
len(distiller._teacher_feature_extractor.outputs), 2
) # final + dense_1
self.assertEqual(
len(distiller._student_feature_extractor.outputs), 2
) # final + dense_1
def test_multi_distillation_loss_distillation_end_to_end(self):
"""Test end-to-end distillation with multiple distillation_loss."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_2",
student_layer_name="student_dense_2",
),
]
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0, 0.5, 0.3],
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify efficient feature extraction
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Should have 3 outputs: final + dense_1 + dense_2
self.assertEqual(len(distiller._teacher_feature_extractor.outputs), 3)
self.assertEqual(len(distiller._student_feature_extractor.outputs), 3)
# Test that loss decreases (learning is happening)
initial_loss = history.history["total_loss"][0]
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(initial_loss))
self.assertTrue(np.isfinite(final_loss))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distillation/distillation_loss.py | keras/src/distillation/distillation_loss.py | import keras
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.saving import serialization_lib
from keras.src.utils import tracking
def _convert_loss_to_function(loss_item):
"""Convert a loss string identifier to a loss function.
Arguments:
loss_item: Either a string identifier, a loss function instance,
or `None`.
Returns:
A loss function instance, or `None`.
Raises:
ValueError: If the loss string identifier is unknown.
"""
if loss_item is None:
return None
elif isinstance(loss_item, str):
loss_fn = keras.losses.get(loss_item)
if loss_fn is None:
raise ValueError(f"Unknown loss function: '{loss_item}'.")
return loss_fn
else:
return loss_item
@keras_export("keras.distillation.DistillationLoss")
class DistillationLoss:
"""Base class for distillation loss computation.
Distillation losses define how to compute the distillation loss
between teacher and student outputs. Each loss implements a specific
approach to knowledge transfer, from simple logits matching to feature-based
distillation.
To create custom distillation losses, subclass this class and
override the `compute_loss` method.
"""
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute distillation loss between teacher and student outputs.
This method should implement the specific distillation logic for
transferring knowledge from teacher to student.
Arguments:
teacher_outputs: Outputs from the teacher model. Can be a single
tensor or a list/tuple of tensors for multi-output models.
student_outputs: Outputs from the student model. Can be a single
tensor or a list/tuple of tensors for multi-output models.
**kwargs: Additional arguments for custom distillation_loss.
Returns:
Distillation loss tensor.
"""
raise NotImplementedError("Subclasses must implement compute_loss")
def validate_outputs(self, teacher_outputs, student_outputs):
"""Validate that teacher and student outputs are compatible.
Arguments:
teacher_outputs: Outputs from the teacher model.
student_outputs: Outputs from the student model.
Raises:
ValueError: If outputs are not compatible.
"""
keras.tree.assert_same_structure(teacher_outputs, student_outputs)
def validate_model_compatibility(self, teacher, student):
"""Validate that teacher and student models are compatible.
Arguments:
teacher: The teacher model.
student: The student model.
Raises:
ValueError: If models are not compatible with this distillation
loss.
"""
pass
@keras_export("keras.distillation.FeatureDistillation")
class FeatureDistillation(DistillationLoss):
"""Feature distillation loss.
Feature distillation transfers knowledge from intermediate layers of the
teacher model to corresponding layers of the student model. This approach
helps the student learn better internal representations and often leads
to better performance compared to logits-only distillation.
Arguments:
loss: Loss function to use for feature distillation. Can be:
- String identifier (e.g., 'mse', 'cosine_similarity', 'mae')
- Keras loss instance
- Nested structure of losses matching the layer output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'mse'.
teacher_layer_name: Name of the teacher layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
student_layer_name: Name of the student layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
Examlpe(s):
```python
# Basic feature distillation from final outputs
distillation_loss = FeatureDistillation(loss="mse")
# Distill from specific intermediate layers
distillation_loss = FeatureDistillation(
loss="mse",
teacher_layer_name="dense_1",
student_layer_name="dense_1"
)
# Use cosine similarity for different feature sizes
distillation_loss = FeatureDistillation(
loss="cosine_similarity",
teacher_layer_name="conv2d_2",
student_layer_name="conv2d_1"
)
# With custom loss instance
distillation_loss = FeatureDistillation(
loss=keras.losses.MeanAbsoluteError()
)
# For multi-output models
distillation_loss = FeatureDistillation(
loss=["mse", "cosine_similarity"]
)
# For multi-output models, only distill some outputs
distillation_loss = FeatureDistillation(
loss=["mse", None, "cosine_similarity"] # Skip middle output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self, loss="mse", teacher_layer_name=None, student_layer_name=None
):
self.teacher_layer_name = teacher_layer_name
self.student_layer_name = student_layer_name
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError(
"The `loss` argument in `FeatureDistillation` must "
"contain at least one non-`None` value."
)
def validate_model_compatibility(self, teacher, student):
"""Validate that teacher and student models are compatible for feature
distillation."""
if (
self.teacher_layer_name is not None
or self.student_layer_name is not None
):
teacher_is_subclassed = (
not hasattr(teacher, "inputs") or teacher.inputs is None
)
student_is_subclassed = (
not hasattr(student, "inputs") or student.inputs is None
)
if teacher_is_subclassed or student_is_subclassed:
subclassed_models = []
if teacher_is_subclassed:
subclassed_models.append("teacher")
if student_is_subclassed:
subclassed_models.append("student")
models_str = " and ".join(subclassed_models)
raise ValueError(
f"FeatureDistillation with specific layer names requires "
f"Functional or Sequential models. The {models_str} "
f"model(s) appear to be subclassed (no symbolic "
f"inputs/outputs). Either use Functional/Sequential "
f"models, or use FeatureDistillation without layer names "
f"(to distill final outputs only), or use "
f"LogitsDistillation instead."
)
if self.teacher_layer_name is not None:
try:
teacher.get_layer(name=self.teacher_layer_name)
except ValueError as e:
raise ValueError(f"In teacher model: {e}")
if self.student_layer_name is not None:
try:
student.get_layer(name=self.student_layer_name)
except ValueError as e:
raise ValueError(f"In student model: {e}")
def validate_outputs(self, teacher_outputs, student_outputs):
"""Validate that outputs are compatible for feature distillation."""
super().validate_outputs(teacher_outputs, student_outputs)
try:
tree.assert_same_structure(self.loss, teacher_outputs)
except ValueError as e:
raise ValueError(
f"Loss structure mismatch. "
f"Loss structure: {tree.structure(self.loss)}, "
f"Output structure: {tree.structure(teacher_outputs)}. "
f"Error: {e}"
)
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute feature distillation loss using extracted features.
Arguments:
teacher_outputs: Extracted features from teacher layer.
student_outputs: Extracted features from student layer.
**kwargs: Additional arguments (ignored).
Returns:
Scalar distillation loss tensor.
"""
def apply_loss(loss_fn, teacher_features, student_features):
if loss_fn is None:
return 0.0
loss = keras.ops.mean(loss_fn(teacher_features, student_features))
return loss
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_outputs, student_outputs
)
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"loss": keras.losses.serialize(self.loss),
"teacher_layer_name": self.teacher_layer_name,
"student_layer_name": self.student_layer_name,
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
@keras_export("keras.distillation.LogitsDistillation")
class LogitsDistillation(DistillationLoss):
"""Distillation loss that transfers knowledge from final model outputs.
This distillation loss applies temperature scaling to the teacher's logits
before computing the loss between teacher and student predictions. It's the
most common approach for knowledge distillation.
Arguments:
temperature: Temperature for softmax scaling. Higher values produce
softer probability distributions that are easier for the student to
learn. Typical values range from 3-5. Defaults to 3.0.
loss: Loss function to use for distillation. Can be:
- String identifier (e.g., 'kl_divergence',
'categorical_crossentropy')
- Keras loss instance
- Nested structure of losses matching the model output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'kl_divergence'.
Examlpe(s):
```python
# Basic logits distillation with KL divergence
distillation_loss = LogitsDistillation(temperature=3.0)
# With categorical crossentropy loss
distillation_loss = LogitsDistillation(
temperature=4.0,
loss="categorical_crossentropy"
)
# With custom loss instance
distillation_loss = LogitsDistillation(
temperature=4.0,
loss=keras.losses.CategoricalCrossentropy(from_logits=True)
)
# For multi-output models
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", "categorical_crossentropy"]
)
# For multi-output models, only distill some outputs
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", None] # Skip second output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self,
temperature=3.0,
loss="kl_divergence",
):
self.temperature = temperature
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError("At least one loss must be non-`None`.")
if not isinstance(self.temperature, (int, float)):
raise ValueError(
f"temperature must be a number, got {type(self.temperature)}"
)
if self.temperature <= 0.0:
raise ValueError("temperature must be positive.")
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute distillation loss using the configured loss function.
Arguments:
teacher_outputs: Logits from teacher model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
student_outputs: Logits from student model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
**kwargs: Additional arguments (ignored).
Returns:
Distillation loss tensor.
"""
# Apply temperature scaling using tree.map_structure
teacher_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), teacher_outputs
)
student_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), student_outputs
)
# Apply loss function(s) to corresponding outputs
def apply_loss(loss_fn, teacher_logits, student_logits):
if loss_fn is None:
return 0.0
# Special handling for KL divergence (needs probabilities)
if isinstance(loss_fn, keras.losses.KLDivergence):
teacher_probs = keras.ops.softmax(teacher_logits, axis=-1)
student_probs = keras.ops.softmax(student_logits, axis=-1)
loss = keras.ops.mean(loss_fn(teacher_probs, student_probs))
# Scale by temperature^2 for KL (per literature)
return loss * (self.temperature**2)
else:
# For other losses, use logits directly
return keras.ops.mean(loss_fn(teacher_logits, student_logits))
# Apply losses using tree.map_structure
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_scaled, student_scaled
)
# Sum all losses and return scalar
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"temperature": self.temperature,
"loss": serialization_lib.serialize_keras_object(self.loss),
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distillation/__init__.py | keras/src/distillation/__init__.py | """Distillation module for knowledge distillation in Keras."""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/regularizers/regularizers_test.py | keras/src/regularizers/regularizers_test.py | import numpy as np
from keras.src import backend
from keras.src import regularizers
from keras.src import testing
from keras.src.regularizers.regularizers import validate_float_arg
class RegularizersTest(testing.TestCase):
def test_config(self):
reg = regularizers.L1(0.1)
self.run_class_serialization_test(reg)
reg = regularizers.L2(0.1)
self.run_class_serialization_test(reg)
reg = regularizers.L1L2(l1=0.1, l2=0.2)
self.run_class_serialization_test(reg)
reg = regularizers.OrthogonalRegularizer(factor=0.1, mode="rows")
self.run_class_serialization_test(reg)
def test_l1(self):
value = np.random.random((4, 4)).astype(np.float32)
x = backend.Variable(value)
y = regularizers.L1(0.1)(x)
self.assertAllClose(y, 0.1 * np.sum(np.abs(value)))
def test_l2(self):
value = np.random.random((4, 4)).astype(np.float32)
x = backend.Variable(value)
y = regularizers.L2(0.1)(x)
self.assertAllClose(y, 0.1 * np.sum(np.square(value)))
def test_l1_l2(self):
value = np.random.random((4, 4)).astype(np.float32)
x = backend.Variable(value)
y = regularizers.L1L2(l1=0.1, l2=0.2)(x)
self.assertAllClose(
y, 0.1 * np.sum(np.abs(value)) + 0.2 * np.sum(np.square(value))
)
def test_orthogonal_regularizer(self):
value = np.random.random((4, 4)).astype(np.float32)
x = backend.Variable(value)
y = regularizers.OrthogonalRegularizer(factor=0.1, mode="rows")(x)
l2_norm = np.linalg.norm(value, axis=1, keepdims=True)
inputs = value / l2_norm
self.assertAllClose(
y,
0.1
* 0.5
* np.sum(
np.abs(np.dot(inputs, np.transpose(inputs)) * (1.0 - np.eye(4)))
)
/ (4.0 * (4.0 - 1.0) / 2.0),
tpu_atol=1e-4,
tpu_rtol=1e-4,
)
def test_get_method(self):
obj = regularizers.get("l1l2")
self.assertIsInstance(obj, regularizers.L1L2)
obj = regularizers.get("l1")
self.assertIsInstance(obj, regularizers.L1)
obj = regularizers.get("l2")
self.assertIsInstance(obj, regularizers.L2)
obj = regularizers.get("orthogonal_regularizer")
self.assertIsInstance(obj, regularizers.OrthogonalRegularizer)
obj = regularizers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
regularizers.get("typo")
def test_l1l2_get_config(self):
l1 = 0.01
l2 = 0.02
reg = regularizers.L1L2(l1=l1, l2=l2)
config = reg.get_config()
self.assertEqual(config, {"l1": l1, "l2": l2})
reg_from_config = regularizers.L1L2.from_config(config)
config_from_config = reg_from_config.get_config()
self.assertDictEqual(config, config_from_config)
self.assertEqual(reg_from_config.l1, l1)
self.assertEqual(reg_from_config.l2, l2)
def test_orthogonal_regularizer_mode_validation(self):
with self.assertRaises(ValueError) as context:
regularizers.OrthogonalRegularizer(factor=0.01, mode="invalid_mode")
expected_message = (
'Invalid value for argument `mode`. Expected one of {"rows", '
'"columns"}. Received: mode=invalid_mode'
)
self.assertEqual(str(context.exception), expected_message)
def test_orthogonal_regularizer_input_rank_validation(self):
with self.assertRaises(ValueError) as context:
value = np.random.random((4, 4, 4)).astype(np.float32)
x = backend.Variable(value)
regularizers.OrthogonalRegularizer(factor=0.1)(x)
expected_message = (
"Inputs to OrthogonalRegularizer must have rank 2. "
f"Received: inputs.shape={(4, 4, 4)}"
)
self.assertEqual(str(context.exception), expected_message)
def test_orthogonal_regularizer_get_config(self):
factor = 0.01
mode = "columns"
regularizer = regularizers.OrthogonalRegularizer(
factor=factor, mode=mode
)
config = regularizer.get_config()
self.assertAlmostEqual(config["factor"], factor, 7)
self.assertEqual(config["mode"], mode)
reg_from_config = regularizers.OrthogonalRegularizer.from_config(config)
config_from_config = reg_from_config.get_config()
self.assertAlmostEqual(config_from_config["factor"], factor, 7)
self.assertEqual(config_from_config["mode"], mode)
class ValidateFloatArgTest(testing.TestCase):
def test_validate_float_with_valid_args(self):
self.assertEqual(validate_float_arg(1, "test"), 1.0)
self.assertEqual(validate_float_arg(1.0, "test"), 1.0)
def test_validate_float_with_invalid_types(self):
with self.assertRaisesRegex(
ValueError, "expected a non-negative float"
):
validate_float_arg("not_a_number", "test")
def test_validate_float_with_nan(self):
with self.assertRaisesRegex(
ValueError, "expected a non-negative float"
):
validate_float_arg(float("nan"), "test")
def test_validate_float_with_inf(self):
with self.assertRaisesRegex(
ValueError, "expected a non-negative float"
):
validate_float_arg(float("inf"), "test")
with self.assertRaisesRegex(
ValueError, "expected a non-negative float"
):
validate_float_arg(-float("inf"), "test")
def test_validate_float_with_negative_number(self):
with self.assertRaisesRegex(
ValueError, "expected a non-negative float"
):
validate_float_arg(-1, "test")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/regularizers/regularizers.py | keras/src/regularizers/regularizers.py | import math
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.utils.numerical_utils import normalize
@keras_export(["keras.Regularizer", "keras.regularizers.Regularizer"])
class Regularizer:
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API
will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
and `Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's
output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers
and the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=L1(0.01),
... activity_regularizer=L2(0.01))
>>> tensor = ops.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size)
>>> # is 5
>>> ops.sum(layer.losses)
5.25
## Available penalties
```python
L1(0.3) # L1 Regularization Penalty
L2(0.1) # L2 Regularization Penalty
L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = L2(2.)
>>> tensor = ops.ones(shape=(5, 5))
>>> regularizer(tensor)
50.0
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> def l1_reg(weight_matrix):
... return 0.01 * ops.sum(ops.absolute(weight_matrix))
...
>>> layer = Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
0.25
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> class L2Regularizer(Regularizer):
... def __init__(self, l2=0.):
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * ops.sum(ops.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
12.5
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for saving and
loading models to HDF5 format, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this
functionality, you must make sure any python process running your model has
also defined and registered your custom regularizer.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.0
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(f"{self} does not implement get_config()")
@keras_export(["keras.regularizers.L1L2", "keras.regularizers.l1_l2"])
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(square(x))`
L1L2 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l1_l2')
In this case, the default values used are `l1=0.01` and `l2=0.01`.
Arguments:
l1: float, L1 regularization factor.
l2: float, L2 regularization factor.
"""
def __init__(self, l1=0.0, l2=0.0):
# The default value for l1 and l2 are different from the value in l1_l2
# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
# and no l1 penalty.
l1 = 0.0 if l1 is None else l1
l2 = 0.0 if l2 is None else l2
validate_float_arg(l1, name="l1")
validate_float_arg(l2, name="l2")
self.l1 = l1
self.l2 = l2
def __call__(self, x):
regularization = ops.convert_to_tensor(0.0, dtype=x.dtype)
if self.l1:
regularization += self.l1 * ops.sum(ops.absolute(x))
if self.l2:
regularization += self.l2 * ops.sum(ops.square(x))
return regularization
def get_config(self):
return {"l1": float(self.l1), "l2": float(self.l2)}
@keras_export(["keras.regularizers.L1", "keras.regularizers.l1"])
class L1(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Arguments:
l1: float, L1 regularization factor.
"""
def __init__(self, l1=0.01):
l1 = 0.01 if l1 is None else l1
validate_float_arg(l1, name="l1")
self.l1 = ops.convert_to_tensor(l1)
def __call__(self, x):
return self.l1 * ops.sum(ops.absolute(x))
def get_config(self):
return {"l1": float(self.l1)}
@keras_export(["keras.regularizers.L2", "keras.regularizers.l2"])
class L2(Regularizer):
"""A regularizer that applies a L2 regularization penalty.
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
L2 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l2')
In this case, the default value used is `l2=0.01`.
Arguments:
l2: float, L2 regularization factor.
"""
def __init__(self, l2=0.01):
l2 = 0.01 if l2 is None else l2
validate_float_arg(l2, name="l2")
self.l2 = l2
def __call__(self, x):
return self.l2 * ops.sum(ops.square(x))
def get_config(self):
return {"l2": float(self.l2)}
@keras_export(
[
"keras.regularizers.OrthogonalRegularizer",
"keras.regularizers.orthogonal_regularizer",
]
)
class OrthogonalRegularizer(Regularizer):
"""Regularizer that encourages input vectors to be orthogonal to each other.
It can be applied to either the rows of a matrix (`mode="rows"`) or its
columns (`mode="columns"`). When applied to a `Dense` kernel of shape
`(input_dim, units)`, rows mode will seek to make the feature vectors
(i.e. the basis of the output space) orthogonal to each other.
Arguments:
factor: Float. The regularization factor. The regularization penalty
will be proportional to `factor` times the mean of the dot products
between the L2-normalized rows (if `mode="rows"`, or columns if
`mode="columns"`) of the inputs, excluding the product of each
row/column with itself. Defaults to `0.01`.
mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In
rows mode, the regularization effect seeks to make the rows of the
input orthogonal to each other. In columns mode, it seeks to make
the columns of the input orthogonal to each other.
Example:
>>> regularizer = OrthogonalRegularizer(factor=0.01)
>>> layer = Dense(units=4, kernel_regularizer=regularizer)
"""
def __init__(self, factor=0.01, mode="rows"):
validate_float_arg(factor, name="factor")
self.factor = ops.convert_to_tensor(factor)
if mode not in {"rows", "columns"}:
raise ValueError(
"Invalid value for argument `mode`. Expected one of "
f'{{"rows", "columns"}}. Received: mode={mode}'
)
self.mode = mode
def __call__(self, inputs):
if len(inputs.shape) != 2:
raise ValueError(
"Inputs to OrthogonalRegularizer must have rank 2. Received: "
f"inputs.shape={inputs.shape}"
)
if self.mode == "rows":
inputs = normalize(inputs, axis=1)
product = ops.matmul(inputs, ops.transpose(inputs))
size = inputs.shape[0]
else:
inputs = normalize(inputs, axis=0)
product = ops.matmul(ops.transpose(inputs), inputs)
size = inputs.shape[1]
product_no_diagonal = product * (
1.0 - ops.eye(size, dtype=inputs.dtype)
)
num_pairs = size * (size - 1.0) / 2.0
return (
self.factor
* 0.5
* ops.sum(ops.absolute(product_no_diagonal))
/ num_pairs
)
def get_config(self):
return {"factor": float(self.factor), "mode": self.mode}
def validate_float_arg(value, name):
"""check penalty number availability, raise ValueError if failed."""
if (
not isinstance(value, (float, int))
or (math.isinf(value) or math.isnan(value))
or value < 0
):
raise ValueError(
f"Invalid value for argument {name}: expected a non-negative float."
f"Received: {name}={value}"
)
return float(value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/regularizers/__init__.py | keras/src/regularizers/__init__.py | import inspect
from keras.src.api_export import keras_export
from keras.src.regularizers.regularizers import L1
from keras.src.regularizers.regularizers import L1L2
from keras.src.regularizers.regularizers import L2
from keras.src.regularizers.regularizers import OrthogonalRegularizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Regularizer,
L1,
L2,
L1L2,
OrthogonalRegularizer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.regularizers.serialize")
def serialize(regularizer):
return serialization_lib.serialize_keras_object(regularizer)
@keras_export("keras.regularizers.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras regularizer object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.regularizers.get")
def get(identifier):
"""Retrieve a Keras regularizer object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret regularizer identifier: {identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/saving_api_test.py | keras/src/saving/saving_api_test.py | import os
import pathlib
import unittest.mock as mock
import numpy as np
from absl import logging
from absl.testing import parameterized
from keras.src import layers
from keras.src.legacy.saving.legacy_h5_format import save_model_to_hdf5
from keras.src.models import Sequential
from keras.src.saving import saving_api
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
class SaveModelTests(test_case.TestCase):
def get_model(self):
return Sequential(
[
layers.Dense(5, input_shape=(3,)),
layers.Softmax(),
]
)
def test_basic_saving(self):
"""Test basic model saving and loading."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
saving_api.save_model(model, filepath)
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
def test_invalid_save_format(self):
"""Test deprecated save_format argument."""
model = self.get_model()
with self.assertRaisesRegex(
ValueError, "The `save_format` argument is deprecated"
):
saving_api.save_model(model, "model.txt", save_format=True)
def test_unsupported_arguments(self):
"""Test unsupported argument during model save."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
with self.assertRaisesRegex(
ValueError, r"The following argument\(s\) are not supported"
):
saving_api.save_model(model, filepath, random_arg=True)
def test_save_h5_format(self):
"""Test saving model in h5 format."""
model = self.get_model()
filepath_h5 = os.path.join(self.get_temp_dir(), "test_model.h5")
# Verify the warning.
with mock.patch.object(logging, "warning") as mock_warn:
saving_api.save_model(model, filepath_h5)
mock_warn.assert_called_once_with(
"You are saving your model as an HDF5 file via "
"`model.save()` or `keras.saving.save_model(model)`. "
"This file format is considered legacy. "
"We recommend using instead the native Keras format, "
"e.g. `model.save('my_model.keras')` or "
"`keras.saving.save_model(model, 'my_model.keras')`. "
)
self.assertTrue(os.path.exists(filepath_h5))
os.remove(filepath_h5)
def test_save_unsupported_extension(self):
"""Test saving model with unsupported extension."""
model = self.get_model()
with self.assertRaisesRegex(
ValueError, "Invalid filepath extension for saving"
):
saving_api.save_model(model, "model.png")
def test_objects_to_skip(self):
model = Sequential(
[
layers.Input((3,)),
layers.Dense(5),
layers.Dense(5),
]
)
skip = model.layers[0]
filepath = os.path.join(self.get_temp_dir(), "test_model.weights.h5")
saving_api.save_weights(model, filepath, objects_to_skip=[skip])
new_model = Sequential(
[
layers.Input((3,)),
layers.Dense(5),
layers.Dense(5),
]
)
new_model.load_weights(filepath, objects_to_skip=[new_model.layers[0]])
self.assertNotAllClose(
new_model.layers[0].get_weights()[0],
model.layers[0].get_weights()[0],
)
self.assertAllClose(
new_model.layers[0].get_weights()[1],
model.layers[0].get_weights()[1],
)
saving_api.save_weights(model, filepath)
new_model.load_weights(filepath, objects_to_skip=[new_model.layers[0]])
self.assertNotAllClose(
new_model.layers[0].get_weights()[0],
model.layers[0].get_weights()[0],
)
self.assertAllClose(
new_model.layers[0].get_weights()[1],
model.layers[0].get_weights()[1],
)
class LoadModelTests(test_case.TestCase):
def get_model(self, dtype=None):
return Sequential(
[
layers.Dense(5, input_shape=(3,), dtype=dtype),
layers.Softmax(),
]
)
@parameterized.named_parameters(
[
{"testcase_name": "bfloat16", "dtype": "bfloat16"},
{"testcase_name": "float16", "dtype": "float16"},
{"testcase_name": "float32", "dtype": "float32"},
{"testcase_name": "float64", "dtype": "float64"},
]
)
def test_basic_load(self, dtype):
"""Test basic model loading."""
model = self.get_model(dtype)
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
saving_api.save_model(model, filepath)
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertEqual(loaded_model.weights[0].dtype, dtype)
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
def test_load_unsupported_format(self):
"""Test loading model with unsupported format."""
with self.assertRaisesRegex(ValueError, "File format not supported"):
saving_api.load_model("model.pkl")
def test_load_keras_not_zip(self):
"""Test loading keras file that's not a zip."""
with self.assertRaisesRegex(ValueError, "File not found"):
saving_api.load_model("not_a_zip.keras")
def test_load_h5_format(self):
"""Test loading model in h5 format."""
model = self.get_model()
filepath_h5 = os.path.join(self.get_temp_dir(), "test_model.h5")
saving_api.save_model(model, filepath_h5)
loaded_model = saving_api.load_model(filepath_h5)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
os.remove(filepath_h5)
def test_load_model_with_custom_objects(self):
"""Test loading model with custom objects."""
class CustomLayer(layers.Layer):
def call(self, inputs):
return inputs
model = Sequential([CustomLayer(input_shape=(3,))])
filepath = os.path.join(self.get_temp_dir(), "custom_model.keras")
model.save(filepath)
loaded_model = saving_api.load_model(
filepath, custom_objects={"CustomLayer": CustomLayer}
)
self.assertIsInstance(loaded_model.layers[0], CustomLayer)
os.remove(filepath)
def test_save_unzipped(self):
"""Test saving/loading an unzipped model dir."""
model = self.get_model()
# Test error with keras extension
bad_filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
with self.assertRaisesRegex(ValueError, "should not end in"):
saving_api.save_model(model, bad_filepath, zipped=False)
filepath = os.path.join(self.get_temp_dir(), "test_model_dir")
saving_api.save_model(model, filepath, zipped=False)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(os.path.isdir(filepath))
config_filepath = os.path.join(filepath, "config.json")
weights_filepath = os.path.join(filepath, "model.weights.h5")
self.assertTrue(os.path.exists(config_filepath))
self.assertTrue(os.path.exists(weights_filepath))
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
class LoadWeightsTests(test_case.TestCase):
def get_model(self, dtype=None):
return Sequential(
[
layers.Dense(5, input_shape=(3,), dtype=dtype),
layers.Softmax(),
]
)
@parameterized.named_parameters(
named_product(
save_format=["keras", "weights.h5", "h5"],
source_dtype=["float64", "float32", "float16", "bfloat16"],
dest_dtype=["float64", "float32", "float16", "bfloat16"],
)
)
def test_load_weights(self, save_format, source_dtype, dest_dtype):
"""Test loading keras weights."""
src_model = self.get_model(dtype=source_dtype)
if save_format == "keras":
filepath = os.path.join(self.get_temp_dir(), "test_weights.keras")
src_model.save(filepath)
elif save_format == "weights.h5":
filepath = os.path.join(
self.get_temp_dir(), "test_weights.weights.h5"
)
src_model.save_weights(filepath)
elif save_format == "h5":
if "bfloat16" in (source_dtype, dest_dtype):
raise self.skipTest(
"bfloat16 dtype is not supported in legacy h5 format."
)
filepath = os.path.join(self.get_temp_dir(), "test_weights.h5")
save_model_to_hdf5(src_model, filepath)
else:
raise ValueError(f"Unsupported save format: {save_format}")
dest_model = self.get_model(dtype=dest_dtype)
dest_model.load_weights(filepath)
src_weights = src_model.get_weights()
dest_weights = dest_model.get_weights()
for orig, loaded in zip(src_weights, dest_weights):
self.assertAllClose(
orig.astype("float32"),
loaded.astype("float32"),
atol=0.001,
rtol=0.01,
)
def test_load_weights_invalid_kwargs(self):
src_model = self.get_model()
keras_filepath = os.path.join(self.get_temp_dir(), "test_weights.keras")
weight_h5_filepath = os.path.join(
self.get_temp_dir(), "test_weights.weights.h5"
)
legacy_h5_filepath = os.path.join(
self.get_temp_dir(), "test_weights.h5"
)
src_model.save(keras_filepath)
src_model.save_weights(weight_h5_filepath)
save_model_to_hdf5(src_model, legacy_h5_filepath)
dest_model = self.get_model()
# Test keras file.
with self.assertRaisesRegex(
ValueError, r"only supports loading '.weights.h5' files."
):
dest_model.load_weights(keras_filepath, objects_to_skip=[])
with self.assertRaisesRegex(
ValueError, r"only supports loading legacy '.h5' or '.hdf5' files."
):
dest_model.load_weights(keras_filepath, by_name=True)
with self.assertRaisesRegex(ValueError, r"Invalid keyword arguments"):
dest_model.load_weights(keras_filepath, bad_kwarg=None)
# Test weights.h5 file.
with self.assertRaisesRegex(
ValueError, r"only supports loading legacy '.h5' or '.hdf5' files."
):
dest_model.load_weights(weight_h5_filepath, by_name=True)
# Test h5 file.
with self.assertRaisesRegex(
ValueError, r"only supports loading '.weights.h5' files."
):
dest_model.load_weights(legacy_h5_filepath, objects_to_skip=[])
def test_load_weights_invalid_extension(self):
"""Test loading weights with unsupported extension."""
model = self.get_model()
with self.assertRaisesRegex(ValueError, "File format not supported"):
model.load_weights("invalid_extension.pkl")
def test_load_sharded_weights(self):
src_model = self.get_model()
temp_filepath = pathlib.Path(
os.path.join(self.get_temp_dir(), "test_weights.weights.json")
)
src_model.save_weights(temp_filepath, max_shard_size=1)
self.assertLen(os.listdir(temp_filepath.parent), 2)
src_weights = src_model.get_weights()
dest_model = self.get_model()
dest_model.load_weights(temp_filepath)
dest_weights = dest_model.get_weights()
for orig, loaded in zip(src_weights, dest_weights):
self.assertAllClose(orig, loaded)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/serialization_lib.py | keras/src/saving/serialization_lib.py | """Object config serialization and deserialization logic."""
import importlib
import inspect
import types
import warnings
import numpy as np
from keras.src import api_export
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.saving import object_registration
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils import python_utils
from keras.src.utils.module_utils import tensorflow as tf
PLAIN_TYPES = (str, int, float, bool)
# List of Keras modules with built-in string representations for Keras defaults
BUILTIN_MODULES = frozenset(
{
"activations",
"constraints",
"initializers",
"losses",
"metrics",
"optimizers",
"regularizers",
}
)
LOADING_APIS = frozenset(
{
"keras.config.enable_unsafe_deserialization",
"keras.models.load_model",
"keras.preprocessing.image.load_img",
"keras.saving.load_model",
"keras.saving.load_weights",
"keras.utils.get_file",
"keras.utils.load_img",
}
)
class SerializableDict:
def __init__(self, **config):
self.config = config
def serialize(self):
return serialize_keras_object(self.config)
class SafeModeScope:
"""Scope to propagate safe mode flag to nested deserialization calls."""
def __init__(self, safe_mode=True):
self.safe_mode = safe_mode
def __enter__(self):
self.original_value = in_safe_mode()
global_state.set_global_attribute("safe_mode_saving", self.safe_mode)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"safe_mode_saving", self.original_value
)
@keras_export("keras.config.enable_unsafe_deserialization")
def enable_unsafe_deserialization():
"""Disables safe mode globally, allowing deserialization of lambdas."""
global_state.set_global_attribute("safe_mode_saving", False)
def in_safe_mode():
return global_state.get_global_attribute("safe_mode_saving")
class ObjectSharingScope:
"""Scope to enable detection and reuse of previously seen objects."""
def __enter__(self):
global_state.set_global_attribute("shared_objects/id_to_obj_map", {})
global_state.set_global_attribute("shared_objects/id_to_config_map", {})
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute("shared_objects/id_to_obj_map", None)
global_state.set_global_attribute(
"shared_objects/id_to_config_map", None
)
def get_shared_object(obj_id):
"""Retrieve an object previously seen during deserialization."""
id_to_obj_map = global_state.get_global_attribute(
"shared_objects/id_to_obj_map"
)
if id_to_obj_map is not None:
return id_to_obj_map.get(obj_id, None)
def record_object_after_serialization(obj, config):
"""Call after serializing an object, to keep track of its config."""
if config["module"] == "__main__":
config["module"] = None # Ensures module is None when no module found
id_to_config_map = global_state.get_global_attribute(
"shared_objects/id_to_config_map"
)
if id_to_config_map is None:
return # Not in a sharing scope
obj_id = int(id(obj))
if obj_id not in id_to_config_map:
id_to_config_map[obj_id] = config
else:
config["shared_object_id"] = obj_id
prev_config = id_to_config_map[obj_id]
prev_config["shared_object_id"] = obj_id
def record_object_after_deserialization(obj, obj_id):
"""Call after deserializing an object, to keep track of it in the future."""
id_to_obj_map = global_state.get_global_attribute(
"shared_objects/id_to_obj_map"
)
if id_to_obj_map is None:
return # Not in a sharing scope
id_to_obj_map[obj_id] = obj
@keras_export(
[
"keras.saving.serialize_keras_object",
"keras.utils.serialize_keras_object",
]
)
def serialize_keras_object(obj):
"""Retrieve the config dict by serializing the Keras object.
`serialize_keras_object()` serializes a Keras object to a python dictionary
that represents the object, and is a reciprocal function of
`deserialize_keras_object()`. See `deserialize_keras_object()` for more
information about the config format.
Args:
obj: the Keras object to serialize.
Returns:
A python dict that represents the object. The python dict can be
deserialized via `deserialize_keras_object()`.
"""
if obj is None:
return obj
if isinstance(obj, PLAIN_TYPES):
return obj
if isinstance(obj, (list, tuple)):
config_arr = [serialize_keras_object(x) for x in obj]
return tuple(config_arr) if isinstance(obj, tuple) else config_arr
if isinstance(obj, dict):
return serialize_dict(obj)
# Special cases:
if isinstance(obj, bytes):
return {
"class_name": "__bytes__",
"config": {"value": obj.decode("utf-8")},
}
if isinstance(obj, slice):
return {
"class_name": "__slice__",
"config": {
"start": serialize_keras_object(obj.start),
"stop": serialize_keras_object(obj.stop),
"step": serialize_keras_object(obj.step),
},
}
# Ellipsis is an instance, and ellipsis class is not in global scope.
# checking equality also fails elsewhere in the library, so we have
# to dynamically get the type.
if isinstance(obj, type(Ellipsis)):
return {"class_name": "__ellipsis__", "config": {}}
if isinstance(obj, backend.KerasTensor):
history = getattr(obj, "_keras_history", None)
if history:
history = list(history)
history[0] = history[0].name
return {
"class_name": "__keras_tensor__",
"config": {
"shape": obj.shape,
"dtype": obj.dtype,
"keras_history": history,
},
}
if tf.available and isinstance(obj, tf.TensorShape):
return obj.as_list() if obj._dims is not None else None
if backend.is_tensor(obj):
return {
"class_name": "__tensor__",
"config": {
"value": backend.convert_to_numpy(obj).tolist(),
"dtype": backend.standardize_dtype(obj.dtype),
},
}
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray) and obj.ndim > 0:
return {
"class_name": "__numpy__",
"config": {
"value": obj.tolist(),
"dtype": backend.standardize_dtype(obj.dtype),
},
}
else:
# Treat numpy floats / etc as plain types.
return obj.item()
if tf.available and isinstance(obj, tf.DType):
return obj.name
if isinstance(obj, types.FunctionType) and obj.__name__ == "<lambda>":
warnings.warn(
"The object being serialized includes a `lambda`. This is unsafe. "
"In order to reload the object, you will have to pass "
"`safe_mode=False` to the loading function. "
"Please avoid using `lambda` in the "
"future, and use named Python functions instead. "
f"This is the `lambda` being serialized: {inspect.getsource(obj)}",
stacklevel=2,
)
return {
"class_name": "__lambda__",
"config": {
"value": python_utils.func_dump(obj),
},
}
if tf.available and isinstance(obj, tf.TypeSpec):
ts_config = obj._serialize()
# TensorShape and tf.DType conversion
ts_config = list(
map(
lambda x: (
x.as_list()
if isinstance(x, tf.TensorShape)
else (x.name if isinstance(x, tf.DType) else x)
),
ts_config,
)
)
return {
"class_name": "__typespec__",
"spec_name": obj.__class__.__name__,
"module": obj.__class__.__module__,
"config": ts_config,
"registered_name": None,
}
inner_config = _get_class_or_fn_config(obj)
config_with_public_class = serialize_with_public_class(
obj.__class__, inner_config
)
if config_with_public_class is not None:
get_build_and_compile_config(obj, config_with_public_class)
record_object_after_serialization(obj, config_with_public_class)
return config_with_public_class
# Any custom object or otherwise non-exported object
if isinstance(obj, types.FunctionType):
module = obj.__module__
else:
module = obj.__class__.__module__
class_name = obj.__class__.__name__
if module == "builtins":
registered_name = None
else:
if isinstance(obj, types.FunctionType):
registered_name = object_registration.get_registered_name(obj)
else:
registered_name = object_registration.get_registered_name(
obj.__class__
)
config = {
"module": module,
"class_name": class_name,
"config": inner_config,
"registered_name": registered_name,
}
get_build_and_compile_config(obj, config)
record_object_after_serialization(obj, config)
return config
def get_build_and_compile_config(obj, config):
if hasattr(obj, "get_build_config"):
build_config = obj.get_build_config()
if build_config is not None:
config["build_config"] = serialize_dict(build_config)
if hasattr(obj, "get_compile_config"):
compile_config = obj.get_compile_config()
if compile_config is not None:
config["compile_config"] = serialize_dict(compile_config)
return
def serialize_with_public_class(cls, inner_config=None):
"""Serializes classes from public Keras API or object registration.
Called to check and retrieve the config of any class that has a public
Keras API or has been registered as serializable via
`keras.saving.register_keras_serializable()`.
"""
# This gets the `keras.*` exported name, such as
# "keras.optimizers.Adam".
keras_api_name = api_export.get_name_from_symbol(cls)
# Case of custom or unknown class object
if keras_api_name is None:
registered_name = object_registration.get_registered_name(cls)
if registered_name is None:
return None
# Return custom object config with corresponding registration name
return {
"module": cls.__module__,
"class_name": cls.__name__,
"config": inner_config,
"registered_name": registered_name,
}
# Split the canonical Keras API name into a Keras module and class name.
parts = keras_api_name.split(".")
return {
"module": ".".join(parts[:-1]),
"class_name": parts[-1],
"config": inner_config,
"registered_name": None,
}
def serialize_with_public_fn(fn, config, fn_module_name=None):
"""Serializes functions from public Keras API or object registration.
Called to check and retrieve the config of any function that has a public
Keras API or has been registered as serializable via
`keras.saving.register_keras_serializable()`. If function's module name
is already known, returns corresponding config.
"""
if fn_module_name:
return {
"module": fn_module_name,
"class_name": "function",
"config": config,
"registered_name": config,
}
keras_api_name = api_export.get_name_from_symbol(fn)
if keras_api_name:
parts = keras_api_name.split(".")
return {
"module": ".".join(parts[:-1]),
"class_name": "function",
"config": config,
"registered_name": config,
}
else:
registered_name = object_registration.get_registered_name(fn)
if not registered_name and not fn.__module__ == "builtins":
return None
return {
"module": fn.__module__,
"class_name": "function",
"config": config,
"registered_name": registered_name,
}
def _get_class_or_fn_config(obj):
"""Return the object's config depending on its type."""
# Functions / lambdas:
if isinstance(obj, types.FunctionType):
return object_registration.get_registered_name(obj)
# All classes:
if hasattr(obj, "get_config"):
config = obj.get_config()
if not isinstance(config, dict):
raise TypeError(
f"The `get_config()` method of {obj} should return "
f"a dict. It returned: {config}"
)
return serialize_dict(config)
elif hasattr(obj, "__name__"):
return object_registration.get_registered_name(obj)
else:
raise TypeError(
f"Cannot serialize object {obj} of type {type(obj)}. "
"To be serializable, "
"a class must implement the `get_config()` method."
)
def serialize_dict(obj):
return {key: serialize_keras_object(value) for key, value in obj.items()}
@keras_export(
[
"keras.saving.deserialize_keras_object",
"keras.utils.deserialize_keras_object",
]
)
def deserialize_keras_object(
config, custom_objects=None, safe_mode=True, **kwargs
):
"""Retrieve the object by deserializing the config dict.
The config dict is a Python dictionary that consists of a set of key-value
pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,
`Metrics`, etc. The saving and loading library uses the following keys to
record information of a Keras object:
- `class_name`: String. This is the name of the class,
as exactly defined in the source
code, such as "LossesContainer".
- `config`: Dict. Library-defined or user-defined key-value pairs that store
the configuration of the object, as obtained by `object.get_config()`.
- `module`: String. The path of the python module. Built-in Keras classes
expect to have prefix `keras`.
- `registered_name`: String. The key the class is registered under via
`keras.saving.register_keras_serializable(package, name)` API. The
key has the format of '{package}>{name}', where `package` and `name` are
the arguments passed to `register_keras_serializable()`. If `name` is not
provided, it uses the class name. If `registered_name` successfully
resolves to a class (that was registered), the `class_name` and `config`
values in the dict will not be used. `registered_name` is only used for
non-built-in classes.
For example, the following dictionary represents the built-in Adam optimizer
with the relevant config:
```python
dict_structure = {
"class_name": "Adam",
"config": {
"amsgrad": false,
"beta_1": 0.8999999761581421,
"beta_2": 0.9990000128746033,
"decay": 0.0,
"epsilon": 1e-07,
"learning_rate": 0.0010000000474974513,
"name": "Adam"
},
"module": "keras.optimizers",
"registered_name": None
}
# Returns an `Adam` instance identical to the original one.
deserialize_keras_object(dict_structure)
```
If the class does not have an exported Keras namespace, the library tracks
it by its `module` and `class_name`. For example:
```python
dict_structure = {
"class_name": "MetricsList",
"config": {
...
},
"module": "keras.trainers.compile_utils",
"registered_name": "MetricsList"
}
# Returns a `MetricsList` instance identical to the original one.
deserialize_keras_object(dict_structure)
```
And the following dictionary represents a user-customized `MeanSquaredError`
loss:
```python
@keras.saving.register_keras_serializable(package='my_package')
class ModifiedMeanSquaredError(keras.losses.MeanSquaredError):
...
dict_structure = {
"class_name": "ModifiedMeanSquaredError",
"config": {
"fn": "mean_squared_error",
"name": "mean_squared_error",
"reduction": "auto"
},
"registered_name": "my_package>ModifiedMeanSquaredError"
}
# Returns the `ModifiedMeanSquaredError` object
deserialize_keras_object(dict_structure)
```
Args:
config: Python dict describing the object.
custom_objects: Python dict containing a mapping between custom
object names the corresponding classes or functions.
safe_mode: Boolean, whether to disallow unsafe `lambda` deserialization.
When `safe_mode=False`, loading an object has the potential to
trigger arbitrary code execution. This argument is only
applicable to the Keras v3 model format. Defaults to `True`.
Returns:
The object described by the `config` dictionary.
"""
safe_scope_arg = in_safe_mode() # Enforces SafeModeScope
safe_mode = safe_scope_arg if safe_scope_arg is not None else safe_mode
module_objects = kwargs.pop("module_objects", None)
custom_objects = custom_objects or {}
tlco = global_state.get_global_attribute("custom_objects_scope_dict", {})
gco = object_registration.GLOBAL_CUSTOM_OBJECTS
custom_objects = {**custom_objects, **tlco, **gco}
if config is None:
return None
if (
isinstance(config, str)
and custom_objects
and custom_objects.get(config) is not None
):
# This is to deserialize plain functions which are serialized as
# string names by legacy saving formats.
return custom_objects[config]
if isinstance(config, (list, tuple)):
return [
deserialize_keras_object(
x, custom_objects=custom_objects, safe_mode=safe_mode
)
for x in config
]
if module_objects is not None:
inner_config, fn_module_name, has_custom_object = None, None, False
if isinstance(config, dict):
if "config" in config:
inner_config = config["config"]
if "class_name" not in config:
raise ValueError(
f"Unknown `config` as a `dict`, config={config}"
)
# Check case where config is function or class and in custom objects
if custom_objects and (
config["class_name"] in custom_objects
or config.get("registered_name") in custom_objects
or (
isinstance(inner_config, str)
and inner_config in custom_objects
)
):
has_custom_object = True
# Case where config is function but not in custom objects
elif config["class_name"] == "function":
fn_module_name = config["module"]
if fn_module_name == "builtins":
config = config["config"]
else:
config = config["registered_name"]
# Case where config is class but not in custom objects
else:
if config.get("module", "_") is None:
raise TypeError(
"Cannot deserialize object of type "
f"`{config['class_name']}`. If "
f"`{config['class_name']}` is a custom class, please "
"register it using the "
"`@keras.saving.register_keras_serializable()` "
"decorator."
)
config = config["class_name"]
if not has_custom_object:
# Return if not found in either module objects or custom objects
if config not in module_objects:
# Object has already been deserialized
return config
if isinstance(module_objects[config], types.FunctionType):
return deserialize_keras_object(
serialize_with_public_fn(
module_objects[config], config, fn_module_name
),
custom_objects=custom_objects,
)
return deserialize_keras_object(
serialize_with_public_class(
module_objects[config], inner_config=inner_config
),
custom_objects=custom_objects,
)
if isinstance(config, PLAIN_TYPES):
return config
if not isinstance(config, dict):
raise TypeError(f"Could not parse config: {config}")
if "class_name" not in config or "config" not in config:
return {
key: deserialize_keras_object(
value, custom_objects=custom_objects, safe_mode=safe_mode
)
for key, value in config.items()
}
class_name = config["class_name"]
inner_config = config["config"] or {}
custom_objects = custom_objects or {}
# Special cases:
if class_name == "__keras_tensor__":
obj = backend.KerasTensor(
inner_config["shape"], dtype=inner_config["dtype"]
)
obj._pre_serialization_keras_history = inner_config["keras_history"]
return obj
if class_name == "__tensor__":
return backend.convert_to_tensor(
inner_config["value"], dtype=inner_config["dtype"]
)
if class_name == "__numpy__":
return np.array(inner_config["value"], dtype=inner_config["dtype"])
if config["class_name"] == "__bytes__":
return inner_config["value"].encode("utf-8")
if config["class_name"] == "__ellipsis__":
return Ellipsis
if config["class_name"] == "__slice__":
return slice(
deserialize_keras_object(
inner_config["start"],
custom_objects=custom_objects,
safe_mode=safe_mode,
),
deserialize_keras_object(
inner_config["stop"],
custom_objects=custom_objects,
safe_mode=safe_mode,
),
deserialize_keras_object(
inner_config["step"],
custom_objects=custom_objects,
safe_mode=safe_mode,
),
)
if config["class_name"] == "__lambda__":
if safe_mode:
raise ValueError(
"Requested the deserialization of a Python lambda. This "
"carries a potential risk of arbitrary code execution and thus "
"it is disallowed by default. If you trust the source of the "
"artifact, you can override this error by passing "
"`safe_mode=False` to the loading function, or calling "
"`keras.config.enable_unsafe_deserialization()."
)
return python_utils.func_load(inner_config["value"])
if tf is not None and config["class_name"] == "__typespec__":
obj = _retrieve_class_or_fn(
config["spec_name"],
config["registered_name"],
config["module"],
obj_type="class",
full_config=config,
custom_objects=custom_objects,
)
# Conversion to TensorShape and DType
inner_config = map(
lambda x: (
tf.TensorShape(x)
if isinstance(x, list)
else (getattr(tf, x) if hasattr(tf.dtypes, str(x)) else x)
),
inner_config,
)
return obj._deserialize(tuple(inner_config))
# Below: classes and functions.
module = config.get("module", None)
registered_name = config.get("registered_name", class_name)
if class_name == "function":
fn_name = inner_config
return _retrieve_class_or_fn(
fn_name,
registered_name,
module,
obj_type="function",
full_config=config,
custom_objects=custom_objects,
)
# Below, handling of all classes.
# First, is it a shared object?
if "shared_object_id" in config:
obj = get_shared_object(config["shared_object_id"])
if obj is not None:
return obj
cls = _retrieve_class_or_fn(
class_name,
registered_name,
module,
obj_type="class",
full_config=config,
custom_objects=custom_objects,
)
if isinstance(cls, types.FunctionType):
return cls
if not hasattr(cls, "from_config"):
raise TypeError(
f"Unable to reconstruct an instance of '{class_name}' because "
f"the class is missing a `from_config()` method. "
f"Full object config: {config}"
)
# Instantiate the class from its config inside a custom object scope
# so that we can catch any custom objects that the config refers to.
custom_obj_scope = object_registration.CustomObjectScope(custom_objects)
safe_mode_scope = SafeModeScope(safe_mode)
with custom_obj_scope, safe_mode_scope:
try:
instance = cls.from_config(inner_config)
except TypeError as e:
raise TypeError(
f"{cls} could not be deserialized properly. Please"
" ensure that components that are Python object"
" instances (layers, models, etc.) returned by"
" `get_config()` are explicitly deserialized in the"
" model's `from_config()` method."
f"\n\nconfig={config}.\n\nException encountered: {e}"
)
build_config = config.get("build_config", None)
if build_config and not instance.built:
instance.build_from_config(build_config)
instance.built = True
compile_config = config.get("compile_config", None)
if compile_config:
instance.compile_from_config(compile_config)
instance.compiled = True
if "shared_object_id" in config:
record_object_after_deserialization(
instance, config["shared_object_id"]
)
return instance
def _retrieve_class_or_fn(
name, registered_name, module, obj_type, full_config, custom_objects=None
):
# If there is a custom object registered via
# `register_keras_serializable()`, that takes precedence.
if obj_type == "function":
custom_obj = object_registration.get_registered_object(
name, custom_objects=custom_objects
)
else:
custom_obj = object_registration.get_registered_object(
registered_name, custom_objects=custom_objects
)
if custom_obj is not None:
return custom_obj
if module:
# If it's a Keras built-in object,
# we cannot always use direct import, because the exported
# module name might not match the package structure
# (e.g. experimental symbols).
if module == "keras" or module.startswith("keras."):
api_name = f"{module}.{name}"
if api_name in LOADING_APIS:
raise ValueError(
f"Cannot deserialize `{api_name}`, loading functions are "
"not allowed during deserialization"
)
obj = api_export.get_symbol_from_name(api_name)
if obj is not None:
return obj
# Configs of Keras built-in functions do not contain identifying
# information other than their name (e.g. 'acc' or 'tanh'). This special
# case searches the Keras modules that contain built-ins to retrieve
# the corresponding function from the identifying string.
if obj_type == "function" and module == "builtins":
for mod in BUILTIN_MODULES:
obj = api_export.get_symbol_from_name(f"keras.{mod}.{name}")
if obj is not None:
return obj
# Workaround for serialization bug in Keras <= 3.6 whereby custom
# functions would only be saved by name instead of registered name,
# i.e. "name" instead of "package>name". This allows recent versions
# of Keras to reload models saved with 3.6 and lower.
if ">" not in name:
separated_name = f">{name}"
for custom_name, custom_object in custom_objects.items():
if custom_name.endswith(separated_name):
return custom_object
# Otherwise, attempt to retrieve the class object given the `module`
# and `class_name`. Import the module, find the class.
package = module.split(".", maxsplit=1)[0]
if package in {"keras", "keras_hub", "keras_cv", "keras_nlp"}:
try:
mod = importlib.import_module(module)
obj = vars(mod).get(name, None)
if isinstance(obj, type) and issubclass(obj, KerasSaveable):
return obj
else:
raise ValueError(
f"Could not deserialize '{module}.{name}' because "
"it is not a KerasSaveable subclass"
)
except ModuleNotFoundError:
raise TypeError(
f"Could not deserialize {obj_type} '{name}' because "
f"its parent module {module} cannot be imported. "
f"Full object config: {full_config}"
)
raise TypeError(
f"Could not locate {obj_type} '{name}'. Make sure custom classes and "
"functions are decorated with "
"`@keras.saving.register_keras_serializable()`. If they are already "
"decorated, make sure they are all imported so that the decorator is "
f"run before trying to load them. Full object config: {full_config}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/file_editor.py | keras/src/saving/file_editor.py | import collections
import json
import os.path
import pprint
import zipfile
import h5py
import numpy as np
import rich.console
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.saving import saving_lib
from keras.src.saving.saving_lib import H5IOStore
from keras.src.utils import naming
from keras.src.utils import summary_utils
try:
import IPython as ipython
except ImportError:
ipython = None
def is_ipython_notebook():
"""Checks if the code is being executed in a notebook."""
try:
from IPython import get_ipython
# Check if an active IPython shell exists.
if get_ipython() is not None:
return True
return False
except ImportError:
return False
@keras_export("keras.saving.KerasFileEditor")
class KerasFileEditor:
"""Utility to inspect, edit, and resave Keras weights files.
You will find this class useful when adapting
an old saved weights file after having made
architecture changes to a model.
Args:
filepath: The path to a local file to inspect and edit.
Examples:
```python
editor = KerasFileEditor("my_model.weights.h5")
# Displays current contents
editor.summary()
# Remove the weights of an existing layer
editor.delete_object("layers/dense_2")
# Add the weights of a new layer
editor.add_object("layers/einsum_dense", weights={"0": ..., "1": ...})
# Save the weights of the edited model
editor.resave_weights("edited_model.weights.h5")
```
"""
def __init__(
self,
filepath,
):
self.filepath = filepath
self.metadata = None
self.config = None
self.model = None
self.console = rich.console.Console(highlight=False)
if filepath.endswith(".keras"):
zf = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(
f"{saving_lib._VARS_FNAME}.h5",
archive=zf,
mode="r",
)
with zf.open(saving_lib._CONFIG_FILENAME, "r") as f:
config_json = f.read()
with zf.open(saving_lib._METADATA_FILENAME, "r") as f:
metadata_json = f.read()
self.config = json.loads(config_json)
self.metadata = json.loads(metadata_json)
elif filepath.endswith(".weights.h5"):
weights_store = H5IOStore(filepath, mode="r")
else:
raise ValueError(
"Invalid filename: "
"expected a `.keras` `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_dict, object_metadata = self._extract_weights_from_store(
weights_store.h5_file
)
weights_store.close()
self.weights_dict = weights_dict
self.object_metadata = object_metadata # {path: object_name}
self.console.print(self._generate_filepath_info(rich_style=True))
if self.metadata is not None:
self.console.print(self._generate_metadata_info(rich_style=True))
def summary(self):
"""Prints the weight structure of the opened file."""
self._weights_summary_cli()
def compare(self, reference_model):
"""Compares the opened file to a reference model.
This method will list all mismatches between the
currently opened file and the provided reference model.
Args:
reference_model: Model instance to compare to.
Returns:
Dict with the following keys:
`'status'`, `'error_count'`, `'match_count'`.
Status can be `'success'` or `'error'`.
`'error_count'` is the number of mismatches found.
`'match_count'` is the number of matching weights found.
"""
self.console.print("Running comparison")
ref_spec = {}
get_weight_spec_of_saveable(reference_model, ref_spec)
def _compare(
target,
ref_spec,
inner_path,
target_name,
ref_name,
error_count,
match_count,
checked_paths,
):
base_inner_path = inner_path
for ref_key, ref_val in ref_spec.items():
inner_path = f"{base_inner_path}/{ref_key}"
if inner_path in checked_paths:
continue
if ref_key not in target:
error_count += 1
checked_paths.add(inner_path)
if isinstance(ref_val, dict):
self.console.print(
f"[color(160)]...Object [bold]{inner_path}[/] "
f"present in {ref_name}, "
f"missing from {target_name}[/]"
)
self.console.print(
f" In {ref_name}, {inner_path} contains "
f"the following keys: {list(ref_val.keys())}"
)
else:
self.console.print(
f"[color(160)]...Weight [bold]{inner_path}[/] "
f"present in {ref_name}, "
f"missing from {target_name}[/]"
)
elif isinstance(ref_val, dict):
_error_count, _match_count = _compare(
target[ref_key],
ref_spec[ref_key],
inner_path,
target_name,
ref_name,
error_count=error_count,
match_count=match_count,
checked_paths=checked_paths,
)
error_count += _error_count
match_count += _match_count
else:
if target[ref_key].shape != ref_val.shape:
error_count += 1
checked_paths.add(inner_path)
self.console.print(
f"[color(160)]...Weight shape mismatch "
f"for [bold]{inner_path}[/][/]\n"
f" In {ref_name}: "
f"shape={ref_val.shape}\n"
f" In {target_name}: "
f"shape={target[ref_key].shape}"
)
else:
match_count += 1
return error_count, match_count
checked_paths = set()
error_count, match_count = _compare(
self.weights_dict,
ref_spec,
inner_path="",
target_name="saved file",
ref_name="reference model",
error_count=0,
match_count=0,
checked_paths=checked_paths,
)
_error_count, _ = _compare(
ref_spec,
self.weights_dict,
inner_path="",
target_name="reference model",
ref_name="saved file",
error_count=0,
match_count=0,
checked_paths=checked_paths,
)
error_count += _error_count
self.console.print("─────────────────────")
if error_count == 0:
status = "success"
self.console.print(
"[color(28)][bold]Comparison successful:[/] "
"saved file is compatible with the reference model[/]"
)
if match_count == 1:
plural = ""
else:
plural = "s"
self.console.print(
f" Found {match_count} matching weight{plural}"
)
else:
status = "error"
if error_count == 1:
plural = ""
else:
plural = "s"
self.console.print(
f"[color(160)][bold]Found {error_count} error{plural}:[/] "
"saved file is not compatible with the reference model[/]"
)
return {
"status": status,
"error_count": error_count,
"match_count": match_count,
}
def _edit_object(self, edit_fn, source_name, target_name=None):
if target_name is not None and "/" in target_name:
raise ValueError(
"Argument `target_name` should be a leaf name, "
"not a full path name. "
f"Received: target_name='{target_name}'"
)
if "/" in source_name:
# It's a path
elements = source_name.split("/")
weights_dict = self.weights_dict
for e in elements[:-1]:
if e not in weights_dict:
raise ValueError(
f"Path '{source_name}' not found in model."
)
weights_dict = weights_dict[e]
if elements[-1] not in weights_dict:
raise ValueError(f"Path '{source_name}' not found in model.")
edit_fn(
weights_dict, source_name=elements[-1], target_name=target_name
)
else:
# Ensure unicity
def count_occurences(d, name, count=0):
for k in d:
if isinstance(d[k], dict):
count += count_occurences(d[k], name, count)
if name in d:
count += 1
return count
occurrences = count_occurences(self.weights_dict, source_name)
if occurrences > 1:
raise ValueError(
f"Name '{source_name}' occurs more than once in the model; "
"try passing a complete path"
)
if occurrences == 0:
raise ValueError(
f"Source name '{source_name}' does not appear in the "
"model. Use `editor.weights_summary()` "
"to list all objects."
)
def _edit(d):
for k in d:
if isinstance(d[k], dict):
_edit(d[k])
if source_name in d:
edit_fn(d, source_name=source_name, target_name=target_name)
_edit(self.weights_dict)
def rename_object(self, object_name, new_name):
"""Rename an object in the file (e.g. a layer).
Args:
object_name: String, name or path of the
object to rename (e.g. `"dense_2"` or
`"layers/dense_2"`).
new_name: String, new name of the object.
"""
def rename_fn(weights_dict, source_name, target_name):
weights_dict[target_name] = weights_dict[source_name]
weights_dict.pop(source_name)
self._edit_object(rename_fn, object_name, new_name)
def delete_object(self, object_name):
"""Removes an object from the file (e.g. a layer).
Args:
object_name: String, name or path of the
object to delete (e.g. `"dense_2"` or
`"layers/dense_2"`).
"""
def delete_fn(weights_dict, source_name, target_name=None):
weights_dict.pop(source_name)
self._edit_object(delete_fn, object_name)
def add_object(self, object_path, weights):
"""Add a new object to the file (e.g. a layer).
Args:
object_path: String, full path of the
object to add (e.g. `"layers/dense_2"`).
weights: Dict mapping weight names to weight
values (arrays),
e.g. `{"0": kernel_value, "1": bias_value}`.
"""
if not isinstance(weights, dict):
raise ValueError(
"Argument `weights` should be a dict "
"where keys are weight names (usually '0', '1', etc.) "
"and values are NumPy arrays. "
f"Received: type(weights)={type(weights)}"
)
if "/" in object_path:
# It's a path
elements = object_path.split("/")
partial_path = "/".join(elements[:-1])
weights_dict = self.weights_dict
for e in elements[:-1]:
if e not in weights_dict:
raise ValueError(
f"Path '{partial_path}' not found in model."
)
weights_dict = weights_dict[e]
weights_dict[elements[-1]] = weights
else:
self.weights_dict[object_path] = weights
def delete_weight(self, object_name, weight_name):
"""Removes a weight from an existing object.
Args:
object_name: String, name or path of the
object from which to remove the weight
(e.g. `"dense_2"` or `"layers/dense_2"`).
weight_name: String, name of the weight to
delete (e.g. `"0"`).
"""
def delete_weight_fn(weights_dict, source_name, target_name=None):
if weight_name not in weights_dict[source_name]:
raise ValueError(
f"Weight {weight_name} not found "
f"in object {object_name}. "
"Weights found: "
f"{list(weights_dict[source_name].keys())}"
)
weights_dict[source_name].pop(weight_name)
self._edit_object(delete_weight_fn, object_name)
def add_weights(self, object_name, weights):
"""Add one or more new weights to an existing object.
Args:
object_name: String, name or path of the
object to add the weights to
(e.g. `"dense_2"` or `"layers/dense_2"`).
weights: Dict mapping weight names to weight
values (arrays),
e.g. `{"0": kernel_value, "1": bias_value}`.
"""
if not isinstance(weights, dict):
raise ValueError(
"Argument `weights` should be a dict "
"where keys are weight names (usually '0', '1', etc.) "
"and values are NumPy arrays. "
f"Received: type(weights)={type(weights)}"
)
def add_weight_fn(weights_dict, source_name, target_name=None):
weights_dict[source_name].update(weights)
self._edit_object(add_weight_fn, object_name)
def save(self, filepath):
"""Save the edited weights file.
Args:
filepath: Path to save the file to.
Must be a `.weights.h5` file.
"""
filepath = str(filepath)
if not filepath.endswith(".weights.h5"):
raise ValueError(
"Invalid `filepath` argument: "
"expected a `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_store = H5IOStore(filepath, mode="w")
def _save(weights_dict, weights_store, inner_path):
vars_to_create = {}
for name, value in weights_dict.items():
if isinstance(value, dict):
if value:
_save(
weights_dict[name],
weights_store,
inner_path=os.path.join(inner_path, name),
)
else:
# e.g. name="0", value=HDF5Dataset
vars_to_create[name] = value
if vars_to_create:
var_store = weights_store.make(inner_path)
for name, value in vars_to_create.items():
var_store[name] = value
_save(self.weights_dict, weights_store, inner_path="")
weights_store.close()
def resave_weights(self, filepath):
self.save(filepath)
def _extract_weights_from_store(self, data, metadata=None, inner_path=""):
metadata = metadata or {}
# ------------------------------------------------------
# Collect metadata for this HDF5 group
# ------------------------------------------------------
object_metadata = {}
for k, v in data.attrs.items():
object_metadata[k] = v
if object_metadata:
metadata[inner_path] = object_metadata
result = collections.OrderedDict()
# ------------------------------------------------------
# Iterate over all keys in this HDF5 group
# ------------------------------------------------------
for key in data.keys():
# IMPORTANT:
# Never mutate inner_path; use local variable.
current_inner_path = f"{inner_path}/{key}"
value = data[key]
# ------------------------------------------------------
# CASE 1 — HDF5 GROUP → RECURSE
# ------------------------------------------------------
if isinstance(value, h5py.Group):
# Skip empty groups
if len(value) == 0:
continue
# Skip empty "vars" groups
if "vars" in value.keys() and len(value["vars"]) == 0:
continue
# Recurse into "vars" subgroup when present
if "vars" in value.keys():
result[key], metadata = self._extract_weights_from_store(
value["vars"],
metadata=metadata,
inner_path=current_inner_path,
)
else:
# Recurse normally
result[key], metadata = self._extract_weights_from_store(
value,
metadata=metadata,
inner_path=current_inner_path,
)
continue # finished processing this key
# ------------------------------------------------------
# CASE 2 — HDF5 DATASET → SAFE LOADING
# ------------------------------------------------------
# Skip any objects that are not proper datasets
if not hasattr(value, "shape") or not hasattr(value, "dtype"):
continue
shape = value.shape
dtype = value.dtype
# ------------------------------------------------------
# Validate SHAPE (avoid malformed / malicious metadata)
# ------------------------------------------------------
# No negative dimensions
if any(dim < 0 for dim in shape):
raise ValueError(
"Malformed HDF5 dataset shape encountered in .keras file; "
"negative dimension detected."
)
# Prevent absurdly high-rank tensors
if len(shape) > 64:
raise ValueError(
"Malformed HDF5 dataset shape encountered in .keras file; "
"tensor rank exceeds safety limit."
)
# Safe product computation (Python int is unbounded)
num_elems = int(np.prod(shape))
# ------------------------------------------------------
# Validate TOTAL memory size
# ------------------------------------------------------
MAX_BYTES = 1 << 32 # 4 GiB
size_bytes = num_elems * dtype.itemsize
if size_bytes > MAX_BYTES:
raise ValueError(
f"HDF5 dataset too large to load safely "
f"({size_bytes} bytes; limit is {MAX_BYTES})."
)
# ------------------------------------------------------
# SAFE — load dataset (guaranteed ≤ 4 GiB)
# ------------------------------------------------------
result[key] = value[()]
# ------------------------------------------------------
# Return final tree and metadata
# ------------------------------------------------------
return result, metadata
def _generate_filepath_info(self, rich_style=False):
if rich_style:
filepath = f"'{self.filepath}'"
filepath = f"{summary_utils.highlight_symbol(filepath)}"
else:
filepath = f"'{self.filepath}'"
return f"Keras model file {filepath}"
def _generate_config_info(self, rich_style=False):
return pprint.pformat(self.config)
def _generate_metadata_info(self, rich_style=False):
version = self.metadata["keras_version"]
date = self.metadata["date_saved"]
if rich_style:
version = f"{summary_utils.highlight_symbol(version)}"
date = f"{summary_utils.highlight_symbol(date)}"
return f"Saved with Keras {version} - date: {date}"
def _print_weights_structure(
self, weights_dict, indent=0, is_first=True, prefix="", inner_path=""
):
for idx, (key, value) in enumerate(weights_dict.items()):
inner_path = os.path.join(inner_path, key)
is_last = idx == len(weights_dict) - 1
if is_first:
is_first = False
connector = "> "
elif is_last:
connector = "└─ "
else:
connector = "├─ "
if isinstance(value, dict):
bold_key = summary_utils.bold_text(key)
object_label = f"{prefix}{connector}{bold_key}"
if inner_path in self.object_metadata:
metadata = self.object_metadata[inner_path]
if "name" in metadata:
name = metadata["name"]
object_label += f" ('{name}')"
self.console.print(object_label)
if is_last:
appended = " "
else:
appended = "│ "
new_prefix = prefix + appended
self._print_weights_structure(
value,
indent + 1,
is_first=is_first,
prefix=new_prefix,
inner_path=inner_path,
)
else:
if hasattr(value, "shape"):
bold_key = summary_utils.bold_text(key)
self.console.print(
f"{prefix}{connector}{bold_key}:"
+ f" shape={value.shape}, dtype={value.dtype}"
)
else:
self.console.print(f"{prefix}{connector}{key}: {value}")
def _weights_summary_cli(self):
self.console.print("Weights structure")
self._print_weights_structure(self.weights_dict, prefix=" " * 2)
def _weights_summary_interactive(self):
def _generate_html_weights(dictionary, margin_left=0, font_size=1):
html = ""
for key, value in dictionary.items():
if isinstance(value, dict) and value:
weights_html = _generate_html_weights(
value, margin_left + 20, font_size - 1
)
html += (
f'<details style="margin-left: {margin_left}px;">'
'<summary style="'
f"font-size: {font_size}em; "
"font-weight: bold;"
f'">{key}</summary>'
f"{weights_html}"
"</details>"
)
else:
html += (
f'<details style="margin-left: {margin_left}px;">'
f'<summary style="font-size: {font_size}em;">'
f"{key} : shape={value.shape}"
f", dtype={value.dtype}</summary>"
f"<div style="
f'"margin-left: {margin_left}px;'
f'"margin-top: {margin_left}px;">'
f"{display_weight(value)}"
"</div>"
"</details>"
)
return html
output = "Weights structure"
initialize_id_counter()
output += _generate_html_weights(self.weights_dict)
ipython.display.display(ipython.display.HTML(output))
def get_weight_spec_of_saveable(saveable, spec, visited_saveables=None):
from keras.src.saving.keras_saveable import KerasSaveable
visited_saveables = visited_saveables or set()
# If the saveable has already been saved, skip it.
if id(saveable) in visited_saveables:
return
if hasattr(saveable, "save_own_variables"):
store = {}
saveable.save_own_variables(store)
if store:
keys = sorted(store.keys())
for k in keys:
val = store[k]
spec[k] = backend.KerasTensor(shape=val.shape, dtype=val.dtype)
visited_saveables.add(id(saveable))
for child_attr, child_obj in saving_lib._walk_saveable(saveable):
if isinstance(child_obj, KerasSaveable):
sub_spec = {}
get_weight_spec_of_saveable(
child_obj,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[child_attr] = sub_spec
elif isinstance(child_obj, (list, dict, tuple, set)):
sub_spec = {}
get_weight_spec_of_container(
child_obj,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[child_attr] = sub_spec
def get_weight_spec_of_container(container, spec, visited_saveables):
from keras.src.saving.keras_saveable import KerasSaveable
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for saveable in container:
if isinstance(saveable, KerasSaveable):
name = naming.to_snake_case(saveable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
sub_spec = {}
get_weight_spec_of_saveable(
saveable,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[name] = sub_spec
def initialize_id_counter():
global div_id_counter
div_id_counter = 0
def increment_id_counter():
global div_id_counter
div_id_counter += 1
def get_id_counter():
return div_id_counter
def display_weight(weight, axis=-1, threshold=16):
def _find_factors_closest_to_sqrt(num):
sqrt_num = int(np.sqrt(num))
for i in range(sqrt_num, 0, -1):
if num % i == 0:
M = i
N = num // i
if M > N:
return N, M
return M, N
def _color_from_rbg(value):
return f"rgba({value[0]}, {value[1]}, {value[2]}, 1)"
def _reduce_3d_array_by_mean(arr, n, axis):
if axis == 2:
trimmed_arr = arr[:, :, : arr.shape[2] - (arr.shape[2] % n)]
reshaped = np.reshape(
trimmed_arr, (arr.shape[0], arr.shape[1], -1, n)
)
mean_values = np.mean(reshaped, axis=3)
elif axis == 1:
trimmed_arr = arr[:, : arr.shape[1] - (arr.shape[1] % n), :]
reshaped = np.reshape(
trimmed_arr, (arr.shape[0], -1, n, arr.shape[2])
)
mean_values = np.mean(reshaped, axis=2)
elif axis == 0:
trimmed_arr = arr[: arr.shape[0] - (arr.shape[0] % n), :, :]
reshaped = np.reshape(
trimmed_arr, (-1, n, arr.shape[1], arr.shape[2])
)
mean_values = np.mean(reshaped, axis=1)
else:
raise ValueError("Axis must be 0, 1, or 2.")
return mean_values
def _create_matrix_html(matrix, subplot_size=840):
rows, cols, num_slices = matrix.shape
M, N = _find_factors_closest_to_sqrt(num_slices)
try:
from matplotlib import cm
except ImportError:
cm = None
if cm:
rgb_matrix = cm.jet(matrix)
else:
rgb_matrix = (matrix - np.min(matrix)) / (
np.max(matrix) - np.min(matrix)
)
rgb_matrix = np.stack([rgb_matrix, rgb_matrix, rgb_matrix], axis=-1)
rgb_matrix = (rgb_matrix[..., :3] * 255).astype("uint8")
subplot_html = ""
for i in range(num_slices):
cell_html = ""
for row in rgb_matrix[..., i, :]:
for rgb in row:
color = _color_from_rbg(rgb)
cell_html += (
f'<div class="cell" '
f'style="background-color: {color};">'
f"</div>"
)
subplot_html += f"""
<div class="matrix">
{cell_html}
</div>
"""
cell_size = subplot_size // (N * cols)
increment_id_counter()
div_id = get_id_counter()
html_code = f"""
<div class="unique-container_{div_id}">
<style>
.unique-container_{div_id} .subplots {{
display: inline-grid;
grid-template-columns: repeat({N}, 1fr);
column-gap: 5px; /* Minimal horizontal gap */
row-gap: 5px; /* Small vertical gap */
margin: 0;
padding: 0;
}}
.unique-container_{div_id} .matrix {{
display: inline-grid;
grid-template-columns: repeat({cols}, {cell_size}px);
grid-template-rows: repeat({rows}, {cell_size}px);
gap: 1px;
margin: 0;
padding: 0;
}}
.unique-container_{div_id} .cell {{
width: {cell_size}px;
height: {cell_size}px;
display: flex;
justify-content: center;
align-items: center;
font-size: 5px;
font-weight: bold;
color: white;
}}
.unique-container_{div_id} {{
margin-top: 20px;
margin-bottom: 20px;
}}
</style>
<div class="subplots">
{subplot_html}
</div>
</div>
"""
return html_code
if weight.ndim == 1:
weight = weight[..., np.newaxis]
weight = np.swapaxes(weight, axis, -1)
weight = weight.reshape(-1, weight.shape[-1])
M, N = _find_factors_closest_to_sqrt(weight.shape[0])
weight = weight.reshape(M, N, weight.shape[-1])
for reduce_axis in [0, 1, 2]:
if weight.shape[reduce_axis] > threshold:
weight = _reduce_3d_array_by_mean(
weight,
weight.shape[reduce_axis] // threshold,
axis=reduce_axis,
)
weight = (weight - weight.min()) / (weight.max() - weight.min() + 1e-5)
html_code = _create_matrix_html(weight)
return html_code
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/saving_lib.py | keras/src/saving/saving_lib.py | """Python-based idempotent model-saving functionality."""
import datetime
import io
import json
import math
import os
import pathlib
import shutil
import tempfile
import warnings
import zipfile
import ml_dtypes
import numpy as np
from keras.src import backend
from keras.src.backend.common import global_state
from keras.src.saving.serialization_lib import ObjectSharingScope
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.utils import dtype_utils
from keras.src.utils import file_utils
from keras.src.utils import io_utils
from keras.src.utils import naming
from keras.src.utils import plot_model
from keras.src.utils.model_visualization import check_pydot
from keras.src.utils.summary_utils import readable_memory_size
from keras.src.utils.summary_utils import weight_memory_size
from keras.src.version import __version__ as keras_version
try:
import h5py
except ImportError:
h5py = None
try:
import psutil
except ImportError:
psutil = None
try:
import huggingface_hub
except ImportError:
huggingface_hub = None
_CONFIG_FILENAME = "config.json"
_METADATA_FILENAME = "metadata.json"
_VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5"
_VARS_FNAME_H5 = f"{_VARS_FNAME}.h5"
_VARS_FNAME_NPZ = f"{_VARS_FNAME}.npz"
_ASSETS_DIRNAME = "assets"
_MEMORY_UPPER_BOUND = 0.5 # 50%
_MODEL_CARD_TEMPLATE = """
---
library_name: keras
---
This model has been uploaded using the Keras library and can be used with JAX,
TensorFlow, and PyTorch backends.
This model card has been generated automatically and should be completed by the
model author.
See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for
more information.
For more details about the model architecture, check out
[config.json](./config.json)."""
def save_model(model, filepath, weights_format="h5", zipped=True):
"""Save a zip-archive representing a Keras model to the given file or path.
The zip-based archive contains the following structure:
- JSON-based configuration file (config.json): Records of model, layer, and
other saveables' configuration.
- H5-based saveable state files, found in respective directories, such as
model/states.npz, model/dense_layer/states.npz, etc.
- Metadata file.
The states of Keras saveables (layers, optimizers, loss, and metrics) are
automatically saved as long as they can be discovered through the attributes
returned by `dir(Model)`. Typically, the state includes the variables
associated with the saveable, but some specially purposed layers may
contain more such as the vocabularies stored in the hashmaps. The saveables
define how their states are saved by exposing `save_state()` and
`load_state()` APIs.
For the case of layer states, the variables will be visited as long as
they are either 1) referenced via layer attributes, or 2) referenced via a
container (list, tuple, or dict), and the container is referenced via a
layer attribute.
"""
if weights_format == "h5" and h5py is None:
raise ImportError("h5py must be installed in order to save a model.")
if not model.built:
warnings.warn(
"You are saving a model that has not yet been built. "
"It might not contain any weights yet. "
"Consider building the model first by calling it "
"on some data.",
stacklevel=2,
)
if isinstance(filepath, io.IOBase):
_save_model_to_fileobj(model, filepath, weights_format)
return
filepath = str(filepath)
is_hf = filepath.startswith("hf://")
if zipped and not filepath.endswith(".keras"):
raise ValueError(
"Invalid `filepath` argument: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
if not zipped and filepath.endswith(".keras"):
raise ValueError(
"When using `zipped=False`, the `filepath` argument should not "
f"end in `.keras`. Received: filepath={filepath}"
)
if zipped and is_hf:
raise ValueError(
"When saving to the Hugging Face Hub, you should not save the "
f"model as zipped. Received: filepath={filepath}, zipped={zipped}"
)
if is_hf:
_upload_model_to_hf(model, filepath, weights_format)
elif not zipped:
_save_model_to_dir(model, filepath, weights_format)
else:
if file_utils.is_remote_path(filepath):
# Remote path. Zip to local memory byte io and copy to remote
zip_filepath = io.BytesIO()
_save_model_to_fileobj(model, zip_filepath, weights_format)
with file_utils.File(filepath, "wb") as f:
f.write(zip_filepath.getvalue())
else:
with open(filepath, "wb") as f:
_save_model_to_fileobj(model, f, weights_format)
def _serialize_model_as_json(model):
with ObjectSharingScope():
serialized_model_dict = serialize_keras_object(model)
config_json = json.dumps(serialized_model_dict)
metadata_json = json.dumps(
{
"keras_version": keras_version,
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
)
return config_json, metadata_json
def _save_model_to_dir(model, dirpath, weights_format):
if not file_utils.exists(dirpath):
file_utils.makedirs(dirpath)
config_json, metadata_json = _serialize_model_as_json(model)
with open(file_utils.join(dirpath, _METADATA_FILENAME), "w") as f:
f.write(metadata_json)
with open(file_utils.join(dirpath, _CONFIG_FILENAME), "w") as f:
f.write(config_json)
weights_filepath = file_utils.join(dirpath, _VARS_FNAME_H5)
assert_dirpath = file_utils.join(dirpath, _ASSETS_DIRNAME)
try:
if weights_format == "h5":
weights_store = H5IOStore(weights_filepath, mode="w")
elif weights_format == "npz":
weights_store = NpzIOStore(weights_filepath, mode="w")
else:
raise ValueError(
"Unknown `weights_format` argument. "
"Expected 'h5' or 'npz'. "
f"Received: weights_format={weights_format}"
)
asset_store = DiskIOStore(assert_dirpath, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_saveables=set(),
)
finally:
weights_store.close()
asset_store.close()
def _save_model_to_fileobj(model, fileobj, weights_format):
config_json, metadata_json = _serialize_model_as_json(model)
with zipfile.ZipFile(fileobj, "w") as zf:
with zf.open(_METADATA_FILENAME, "w") as f:
f.write(metadata_json.encode())
with zf.open(_CONFIG_FILENAME, "w") as f:
f.write(config_json.encode())
weights_file_path = None
weights_store = None
asset_store = None
write_zf = False
try:
if weights_format == "h5":
try:
if is_memory_sufficient(model):
# Load the model weights into memory before writing
# .keras if the system memory is sufficient.
weights_store = H5IOStore(
_VARS_FNAME_H5, archive=zf, mode="w"
)
else:
# Try opening the .h5 file, then writing it to `zf` at
# the end of the function call. This is more memory
# efficient than writing the weights into memory first.
working_dir = pathlib.Path(fileobj.name).parent
weights_file_path = tempfile.NamedTemporaryFile(
dir=working_dir
)
weights_store = H5IOStore(
weights_file_path.name, mode="w"
)
write_zf = True
except:
# If we can't use the local disk for any reason, write the
# weights into memory first, which consumes more memory.
weights_store = H5IOStore(
_VARS_FNAME_H5, archive=zf, mode="w"
)
elif weights_format == "npz":
weights_store = NpzIOStore(
_VARS_FNAME_NPZ, archive=zf, mode="w"
)
else:
raise ValueError(
"Unknown `weights_format` argument. "
"Expected 'h5' or 'npz'. "
f"Received: weights_format={weights_format}"
)
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_saveables=set(),
)
except:
# Skip the final `zf.write` if any exception is raised
write_zf = False
if weights_store:
weights_store.archive = None
raise
finally:
if weights_store:
weights_store.close()
if asset_store:
asset_store.close()
if write_zf and weights_file_path:
zf.write(weights_file_path.name, _VARS_FNAME_H5)
if weights_file_path:
weights_file_path.close()
def _upload_model_to_hf(model, hf_path, weights_format):
if huggingface_hub is None:
raise ImportError(
"To save models to the Hugging Face Hub, "
"you must install the `huggingface_hub` package."
)
original_hf_path = hf_path
if hf_path.startswith("hf://"):
hf_path = hf_path[5:]
if hf_path.count("/") > 1:
raise ValueError(
"Invalid `hf_path` argument: expected `namespace/model_name`"
f" format. Received: hf_path={original_hf_path}"
)
api = huggingface_hub.HfApi(
library_name="keras", library_version=keras_version
)
repo_url = api.create_repo(hf_path, exist_ok=True)
repo_id = repo_url.repo_id
with tempfile.TemporaryDirectory() as tmp_dir:
_save_model_to_dir(model, tmp_dir, weights_format)
model_card = _MODEL_CARD_TEMPLATE
if check_pydot():
plot_path = file_utils.join(tmp_dir, "assets", "summary_plot.png")
plot_model(
model,
to_file=plot_path,
show_layer_names=True,
show_shapes=True,
show_dtype=True,
)
if len(model.layers) <= 10:
model_card += "\n\n"
else:
model_card += (
"A plot of the model can be found "
"[here](./assets/summary_plot.png)."
)
with open(file_utils.join(tmp_dir, "README.md"), "w") as f:
f.write(model_card)
api.upload_folder(
repo_id=repo_id,
folder_path=tmp_dir,
commit_message="Save model using Keras.",
)
io_utils.print_msg(
f"Model saved to the Hugging Face Hub: {repo_url}\n"
"To load back the model, use "
f"`keras.saving.load_model('hf://{repo_id}')`"
)
def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
"""Load a zip archive representing a Keras model."""
if isinstance(filepath, io.IOBase):
return _load_model_from_fileobj(
filepath, custom_objects, compile, safe_mode
)
elif str(filepath).startswith("hf://"):
if huggingface_hub is None:
raise ImportError(
"To load models from the Hugging Face Hub, "
"you must install the `huggingface_hub` package."
)
repo_id = filepath[5:]
folder_path = huggingface_hub.snapshot_download(
repo_id=repo_id,
library_name="keras",
library_version=keras_version,
)
return _load_model_from_dir(
folder_path, custom_objects, compile, safe_mode
)
else:
filepath = str(filepath)
if not filepath.endswith(".keras"):
is_keras_dir = file_utils.isdir(filepath) and file_utils.exists(
file_utils.join(filepath, "config.json")
)
if is_keras_dir:
return _load_model_from_dir(
filepath, custom_objects, compile, safe_mode
)
raise ValueError(
"Invalid filename: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
with open(filepath, "rb") as f:
return _load_model_from_fileobj(
f, custom_objects, compile, safe_mode
)
def _load_model_from_dir(dirpath, custom_objects, compile, safe_mode):
if not file_utils.exists(dirpath):
raise ValueError(f"Directory doesn't exist: {dirpath}")
if not file_utils.isdir(dirpath):
raise ValueError(f"Path isn't a directory: {dirpath}")
with open(file_utils.join(dirpath, _CONFIG_FILENAME), "r") as f:
config_json = f.read()
model = _model_from_config(config_json, custom_objects, compile, safe_mode)
all_filenames = file_utils.listdir(dirpath)
try:
if _VARS_FNAME_H5 in all_filenames:
weights_file_path = file_utils.join(dirpath, _VARS_FNAME_H5)
weights_store = H5IOStore(weights_file_path, mode="r")
elif _VARS_FNAME_NPZ in all_filenames:
weights_file_path = file_utils.join(dirpath, _VARS_FNAME_NPZ)
weights_store = NpzIOStore(weights_file_path, mode="r")
else:
raise ValueError(
f"Expected a {_VARS_FNAME_H5} or {_VARS_FNAME_NPZ} file."
)
if len(all_filenames) > 3:
asset_store = DiskIOStore(
file_utils.join(dirpath, _ASSETS_DIRNAME), mode="r"
)
else:
asset_store = None
failed_saveables = set()
error_msgs = {}
_load_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_saveables=set(),
failed_saveables=failed_saveables,
error_msgs=error_msgs,
)
finally:
weights_store.close()
if asset_store:
asset_store.close()
if failed_saveables:
_raise_loading_failure(error_msgs)
return model
def _model_from_config(config_json, custom_objects, compile, safe_mode):
# Note: we should NOT use a custom JSON decoder. Anything that
# needs custom decoding must be handled in deserialize_keras_object.
config_dict = json.loads(config_json)
if not compile:
# Disable compilation
config_dict["compile_config"] = None
# Construct the model from the configuration file in the archive.
with ObjectSharingScope():
model = deserialize_keras_object(
config_dict, custom_objects, safe_mode=safe_mode
)
return model
def _load_model_from_fileobj(fileobj, custom_objects, compile, safe_mode):
with zipfile.ZipFile(fileobj, "r") as zf:
with zf.open(_CONFIG_FILENAME, "r") as f:
config_json = f.read()
model = _model_from_config(
config_json, custom_objects, compile, safe_mode
)
all_filenames = zf.namelist()
extract_dir = None
weights_store = None
asset_store = None
try:
if _VARS_FNAME_H5 in all_filenames:
try:
if is_memory_sufficient(model):
# Load the entire file into memory if the system memory
# is sufficient.
io_file = io.BytesIO(
zf.open(_VARS_FNAME_H5, "r").read()
)
weights_store = H5IOStore(io_file, mode="r")
else:
# Try extracting the model.weights.h5 file, and then
# loading it using using h5py. This is significantly
# faster than reading from the zip archive on the fly.
extract_dir = tempfile.TemporaryDirectory(
dir=pathlib.Path(fileobj.name).parent
)
zf.extract(_VARS_FNAME_H5, extract_dir.name)
weights_store = H5IOStore(
pathlib.Path(extract_dir.name, _VARS_FNAME_H5),
mode="r",
)
except:
# If we can't use the local disk for any reason, read the
# weights from the zip archive on the fly, which is less
# efficient.
weights_store = H5IOStore(_VARS_FNAME_H5, zf, mode="r")
elif _VARS_FNAME_NPZ in all_filenames:
weights_store = NpzIOStore(_VARS_FNAME_NPZ, zf, mode="r")
else:
raise ValueError(
f"Expected a {_VARS_FNAME_H5} or {_VARS_FNAME_NPZ} file."
)
if len(all_filenames) > 3:
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="r")
failed_saveables = set()
error_msgs = {}
_load_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_saveables=set(),
failed_saveables=failed_saveables,
error_msgs=error_msgs,
)
finally:
if weights_store:
weights_store.close()
if asset_store:
asset_store.close()
if extract_dir:
extract_dir.cleanup()
if failed_saveables:
_raise_loading_failure(error_msgs)
return model
def save_weights_only(
model, filepath, max_shard_size=None, objects_to_skip=None
):
"""Save only the weights of a model to a target filepath.
Supports both `.weights.h5` and `.keras`.
"""
if not model.built:
raise ValueError(
"You are saving a model that has not yet been built. "
"Try building the model first by calling it on some data or "
"by using `build()`."
)
filepath_str = str(filepath)
tmp_dir = None
remote_filepath = None
if max_shard_size is None and not filepath_str.endswith(".weights.h5"):
raise ValueError(
"The filename must end in `.weights.h5`. "
f"Received: filepath={filepath_str}"
)
elif max_shard_size is not None and not filepath_str.endswith(
("weights.h5", "weights.json")
):
raise ValueError(
"The filename must end in `.weights.json` when `max_shard_size` is "
f"specified. Received: filepath={filepath_str}"
)
try:
if file_utils.is_remote_path(filepath):
tmp_dir = get_temp_dir()
local_filepath = os.path.join(tmp_dir, os.path.basename(filepath))
remote_filepath = filepath
filepath = local_filepath
if max_shard_size is not None:
weights_store = ShardedH5IOStore(filepath, max_shard_size, mode="w")
else:
weights_store = H5IOStore(filepath, mode="w")
if objects_to_skip is not None:
visited_saveables = set(id(o) for o in objects_to_skip)
else:
visited_saveables = set()
_save_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
visited_saveables=visited_saveables,
)
weights_store.close()
finally:
if tmp_dir is not None:
file_utils.copy(filepath, remote_filepath)
shutil.rmtree(tmp_dir)
def load_weights_only(
model, filepath, skip_mismatch=False, objects_to_skip=None
):
"""Load the weights of a model from a filepath (.keras or .weights.h5).
Note: only supports h5 for now.
"""
if not model.built:
raise ValueError(
"You are loading weights into a model that has not yet been built. "
"Try building the model first by calling it on some data or "
"by using `build()`."
)
archive = None
tmp_dir = None
filepath_str = str(filepath)
try:
if file_utils.is_remote_path(filepath_str):
tmp_dir = get_temp_dir()
local_filepath = os.path.join(
tmp_dir, os.path.basename(filepath_str)
)
file_utils.copy(filepath_str, local_filepath)
filepath_str = filepath = local_filepath
if filepath_str.endswith("weights.h5"):
weights_store = H5IOStore(filepath, mode="r")
elif filepath_str.endswith("weights.json"):
weights_store = ShardedH5IOStore(filepath, mode="r")
elif filepath_str.endswith(".keras"):
archive = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(_VARS_FNAME_H5, archive=archive, mode="r")
failed_saveables = set()
if objects_to_skip is not None:
visited_saveables = set(id(o) for o in objects_to_skip)
else:
visited_saveables = set()
error_msgs = {}
_load_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
skip_mismatch=skip_mismatch,
visited_saveables=visited_saveables,
failed_saveables=failed_saveables,
error_msgs=error_msgs,
)
weights_store.close()
if archive:
archive.close()
if failed_saveables:
_raise_loading_failure(error_msgs, warn_only=skip_mismatch)
finally:
if tmp_dir is not None:
shutil.rmtree(tmp_dir)
def _raise_loading_failure(error_msgs, warn_only=False):
first_key = list(error_msgs.keys())[0]
ex_saveable, ex_error = error_msgs[first_key]
msg = (
f"A total of {len(error_msgs)} objects could not "
"be loaded. Example error message for "
f"object {ex_saveable}:\n\n"
f"{ex_error}\n\n"
"List of objects that could not be loaded:\n"
f"{[x[0] for x in error_msgs.values()]}"
)
if warn_only:
warnings.warn(msg)
else:
raise ValueError(msg)
def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path):
if not file_utils.isdir(system_path):
zipfile_to_save.write(system_path, zip_path)
else:
for file_name in file_utils.listdir(system_path):
system_file_path = file_utils.join(system_path, file_name).replace(
"\\", "/"
)
zip_file_path = file_utils.join(zip_path, file_name).replace(
"\\", "/"
)
_write_to_zip_recursively(
zipfile_to_save, system_file_path, zip_file_path
)
def _name_key(name):
"""Make sure that private attributes are visited last."""
if name.startswith("_"):
return f"~{name}"
return name
def _walk_saveable(saveable):
from keras.src.saving.keras_saveable import KerasSaveable
if not isinstance(saveable, KerasSaveable):
raise ValueError(
"Expected object to be an "
"instance of `KerasSaveable`, but "
f"got {saveable} of type {type(saveable)}"
)
obj_type = saveable._obj_type()
attr_skipset = get_attr_skipset(obj_type)
# Save all layers directly tracked by Sequential and Functional first.
# This helps avoid ordering concerns for subclassed Sequential or Functional
# models with extra attributes--the internal Keras state take precedence.
if obj_type in ("Sequential", "Functional"):
yield "layers", saveable.layers
for child_attr in sorted(dir(saveable), key=lambda x: _name_key(x)):
if child_attr.startswith("__") or child_attr in attr_skipset:
continue
try:
child_obj = getattr(saveable, child_attr)
except Exception:
# Avoid raising the exception when visiting the attributes.
continue
yield child_attr, child_obj
def _save_state(
saveable,
weights_store,
assets_store,
inner_path,
visited_saveables,
):
from keras.src.saving.keras_saveable import KerasSaveable
if not isinstance(weights_store, (H5IOStore, ShardedH5IOStore, NpzIOStore)):
raise ValueError(
"Expected `weights_store` to be an instance of "
"`H5IOStore`, `ShardedH5IOStore` or `NpzIOStore`. "
f"Received: {weights_store} of type {type(weights_store)}"
)
if not isinstance(assets_store, (DiskIOStore, type(None))):
raise ValueError(
"Expected `assets_store` to be an instance of "
"`DiskIOStore` or `None`. "
f"Received: {assets_store} of type {type(assets_store)}"
)
# If the saveable has already been saved, skip it.
if id(saveable) in visited_saveables:
return
if hasattr(saveable, "save_own_variables") and weights_store:
if hasattr(saveable, "name") and isinstance(saveable.name, str):
metadata = {"name": saveable.name}
else:
metadata = None
saveable.save_own_variables(
weights_store.make(inner_path, metadata=metadata)
)
if hasattr(saveable, "save_assets") and assets_store:
saveable.save_assets(assets_store.make(inner_path))
visited_saveables.add(id(saveable))
# Recursively save state of children saveables (layers, optimizers, etc.)
for child_attr, child_obj in _walk_saveable(saveable):
if isinstance(child_obj, KerasSaveable):
_save_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
visited_saveables=visited_saveables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_save_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
visited_saveables=visited_saveables,
)
def _load_state(
saveable,
weights_store,
assets_store,
inner_path,
skip_mismatch=False,
visited_saveables=None,
failed_saveables=None,
error_msgs=None,
):
from keras.src.saving.keras_saveable import KerasSaveable
if not isinstance(weights_store, (H5IOStore, ShardedH5IOStore, NpzIOStore)):
raise ValueError(
"Expected `weights_store` to be an instance of "
"`H5IOStore`, `ShardedH5IOStore` or `NpzIOStore`. "
f"Received: {weights_store} of type {type(weights_store)}"
)
if not isinstance(assets_store, (DiskIOStore, type(None))):
raise ValueError(
"Expected `assets_store` to be an instance of "
"`DiskIOStore` or `None`. "
f"Received: {assets_store} of type {type(assets_store)}"
)
if visited_saveables and id(saveable) in visited_saveables:
return
failure = False
if hasattr(saveable, "load_own_variables") and weights_store:
if skip_mismatch or failed_saveables is not None:
try:
saveable.load_own_variables(weights_store.get(inner_path))
except Exception as e:
failed_saveables.add(id(saveable))
error_msgs[id(saveable)] = saveable, e
failure = True
else:
saveable.load_own_variables(weights_store.get(inner_path))
if hasattr(saveable, "load_assets") and assets_store:
if skip_mismatch or failed_saveables is not None:
try:
saveable.load_assets(assets_store.get(inner_path))
except Exception as e:
failed_saveables.add(id(saveable))
error_msgs[id(saveable)] = saveable, e
failure = True
else:
saveable.load_assets(assets_store.get(inner_path))
if failed_saveables is not None:
currently_failed = len(failed_saveables)
else:
currently_failed = 0
# Recursively load states for Keras saveables such as layers/optimizers.
for child_attr, child_obj in _walk_saveable(saveable):
if isinstance(child_obj, KerasSaveable):
_load_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
skip_mismatch=skip_mismatch,
visited_saveables=visited_saveables,
failed_saveables=failed_saveables,
error_msgs=error_msgs,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_load_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
skip_mismatch=skip_mismatch,
visited_saveables=visited_saveables,
failed_saveables=failed_saveables,
error_msgs=error_msgs,
)
if failed_saveables is not None:
newly_failed = len(failed_saveables) - currently_failed
else:
newly_failed = 0
if not failure:
if visited_saveables is not None and newly_failed <= 0:
visited_saveables.add(id(saveable))
if id(saveable) in failed_saveables:
failed_saveables.remove(id(saveable))
error_msgs.pop(id(saveable))
def _save_container_state(
container, weights_store, assets_store, inner_path, visited_saveables
):
from keras.src.saving.keras_saveable import KerasSaveable
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for saveable in container:
if isinstance(saveable, KerasSaveable):
# Do NOT address the saveable via `saveable.name`, since
# names are usually autogenerated and thus not reproducible
# (i.e. they may vary across two instances of the same model).
name = naming.to_snake_case(saveable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_save_state(
saveable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name).replace("\\", "/"),
visited_saveables=visited_saveables,
)
def _load_container_state(
container,
weights_store,
assets_store,
inner_path,
skip_mismatch,
visited_saveables,
failed_saveables,
error_msgs,
):
from keras.src.saving.keras_saveable import KerasSaveable
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for saveable in container:
if isinstance(saveable, KerasSaveable):
name = naming.to_snake_case(saveable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_load_state(
saveable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name).replace("\\", "/"),
skip_mismatch=skip_mismatch,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/serialization_lib_test.py | keras/src/saving/serialization_lib_test.py | """Tests for serialization_lib."""
import json
import numpy as np
import pytest
import keras
from keras.src import ops
from keras.src import testing
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
def custom_fn(x):
return x**2
class CustomLayer(keras.layers.Layer):
def __init__(self, factor):
super().__init__()
self.factor = factor
def call(self, x):
return x * self.factor
def get_config(self):
return {"factor": self.factor}
class NestedCustomLayer(keras.layers.Layer):
def __init__(self, factor, dense=None, activation=None):
super().__init__()
self.factor = factor
if dense is None:
self.dense = keras.layers.Dense(1, activation=custom_fn)
else:
self.dense = serialization_lib.deserialize_keras_object(dense)
self.activation = serialization_lib.deserialize_keras_object(activation)
def call(self, x):
return self.dense(x * self.factor)
def get_config(self):
return {
"factor": self.factor,
"dense": self.dense,
"activation": self.activation,
}
class WrapperLayer(keras.layers.Wrapper):
def call(self, x):
return self.layer(x)
class SerializationLibTest(testing.TestCase):
def roundtrip(self, obj, custom_objects=None, safe_mode=True):
serialized = serialization_lib.serialize_keras_object(obj)
json_data = json.dumps(serialized)
json_data = json.loads(json_data)
deserialized = serialization_lib.deserialize_keras_object(
json_data, custom_objects=custom_objects, safe_mode=safe_mode
)
reserialized = serialization_lib.serialize_keras_object(deserialized)
return serialized, deserialized, reserialized
def test_simple_objects(self):
for obj in [
"hello",
b"hello",
np.array([0, 1]),
np.array([0.0, 1.0]),
np.float32(1.0),
["hello", 0, "world", 1.0, True],
{"1": "hello", "2": 0, "3": True},
{"1": "hello", "2": [True, False]},
slice(None, 20, 1),
slice(None, np.array([0, 1]), 1),
]:
serialized, _, reserialized = self.roundtrip(obj)
self.assertEqual(serialized, reserialized)
def test_builtin_layers(self):
layer = keras.layers.Dense(
3,
name="foo",
trainable=False,
dtype="float16",
)
serialized, restored, reserialized = self.roundtrip(layer)
self.assertEqual(serialized, reserialized)
self.assertEqual(layer.name, restored.name)
self.assertEqual(layer.trainable, restored.trainable)
self.assertEqual(layer.compute_dtype, restored.compute_dtype)
def test_numpy_get_item_layer(self):
def tuples_to_lists_str(x):
return str(x).replace("(", "[").replace(")", "]")
input = keras.layers.Input(shape=(2,))
layer = input[:, 1]
model = keras.Model(input, layer)
serialized, _, reserialized = self.roundtrip(model)
# Anticipate JSON roundtrip mapping tuples to lists:
serialized_str = tuples_to_lists_str(serialized)
reserialized_str = tuples_to_lists_str(reserialized)
self.assertEqual(serialized_str, reserialized_str)
def test_serialize_ellipsis(self):
_, deserialized, _ = self.roundtrip(Ellipsis)
self.assertEqual(..., deserialized)
def test_tensors_and_shapes(self):
x = ops.random.normal((2, 2), dtype="float64")
obj = {"x": x}
_, new_obj, _ = self.roundtrip(obj)
self.assertAllClose(x, new_obj["x"], atol=1e-5)
obj = {"x.shape": x.shape}
_, new_obj, _ = self.roundtrip(obj)
self.assertEqual(tuple(x.shape), tuple(new_obj["x.shape"]))
def test_custom_fn(self):
obj = {"activation": custom_fn}
serialized, _, reserialized = self.roundtrip(
obj, custom_objects={"custom_fn": custom_fn}
)
self.assertEqual(serialized, reserialized)
# Test inside layer
dense = keras.layers.Dense(1, activation=custom_fn)
dense.build((None, 2))
_, new_dense, _ = self.roundtrip(
dense, custom_objects={"custom_fn": custom_fn}
)
x = ops.random.normal((2, 2))
y1 = dense(x)
_ = new_dense(x)
new_dense.set_weights(dense.get_weights())
y2 = new_dense(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_custom_layer(self):
layer = CustomLayer(factor=2)
x = ops.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer, custom_objects={"CustomLayer": CustomLayer}
)
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
layer = NestedCustomLayer(factor=2)
x = ops.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer,
custom_objects={
"NestedCustomLayer": NestedCustomLayer,
"custom_fn": custom_fn,
},
)
_ = new_layer(x)
new_layer.set_weights(layer.get_weights())
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_lambda_fn(self):
obj = {"activation": lambda x: x**2}
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(obj, safe_mode=True)
_, new_obj, _ = self.roundtrip(obj, safe_mode=False)
self.assertEqual(obj["activation"](3), new_obj["activation"](3))
def test_lambda_layer(self):
lmbda = keras.layers.Lambda(lambda x: x**2)
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(lmbda, safe_mode=True)
_, new_lmbda, _ = self.roundtrip(lmbda, safe_mode=False)
x = ops.random.normal((2, 2))
y1 = lmbda(x)
y2 = new_lmbda(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_safe_mode_scope(self):
lmbda = keras.layers.Lambda(lambda x: x**2)
with serialization_lib.SafeModeScope(safe_mode=True):
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(lmbda)
with serialization_lib.SafeModeScope(safe_mode=False):
_, new_lmbda, _ = self.roundtrip(lmbda)
x = ops.random.normal((2, 2))
y1 = lmbda(x)
y2 = new_lmbda(x)
self.assertAllClose(y1, y2, atol=1e-5)
@pytest.mark.requires_trainable_backend
def test_dict_inputs_outputs(self):
input_foo = keras.Input((2,), name="foo")
input_bar = keras.Input((2,), name="bar")
dense = keras.layers.Dense(1)
output_foo = dense(input_foo)
output_bar = dense(input_bar)
model = keras.Model(
{"foo": input_foo, "bar": input_bar},
{"foo": output_foo, "bar": output_bar},
)
_, new_model, _ = self.roundtrip(model)
original_output = model(
{"foo": np.zeros((2, 2)), "bar": np.zeros((2, 2))}
)
restored_output = model(
{"foo": np.zeros((2, 2)), "bar": np.zeros((2, 2))}
)
self.assertAllClose(original_output["foo"], restored_output["foo"])
self.assertAllClose(original_output["bar"], restored_output["bar"])
@pytest.mark.requires_trainable_backend
def test_shared_inner_layer(self):
with serialization_lib.ObjectSharingScope():
input_1 = keras.Input((2,))
input_2 = keras.Input((2,))
shared_layer = keras.layers.Dense(1)
output_1 = shared_layer(input_1)
wrapper_layer = WrapperLayer(shared_layer)
output_2 = wrapper_layer(input_2)
model = keras.Model([input_1, input_2], [output_1, output_2])
_, new_model, _ = self.roundtrip(
model, custom_objects={"WrapperLayer": WrapperLayer}
)
self.assertIs(model.layers[2], model.layers[3].layer)
self.assertIs(new_model.layers[2], new_model.layers[3].layer)
@pytest.mark.requires_trainable_backend
def test_functional_subclass(self):
class PlainFunctionalSubclass(keras.Model):
pass
inputs = keras.Input((2,), batch_size=3)
outputs = keras.layers.Dense(1)(inputs)
model = PlainFunctionalSubclass(inputs, outputs)
x = ops.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={"PlainFunctionalSubclass": PlainFunctionalSubclass},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, PlainFunctionalSubclass)
class FunctionalSubclassWCustomInit(keras.Model):
def __init__(self, num_units=2):
inputs = keras.Input((2,), batch_size=3)
outputs = keras.layers.Dense(num_units)(inputs)
super().__init__(inputs, outputs)
self.num_units = num_units
def get_config(self):
return {"num_units": self.num_units}
model = FunctionalSubclassWCustomInit(num_units=3)
x = ops.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={
"FunctionalSubclassWCustomInit": FunctionalSubclassWCustomInit
},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, FunctionalSubclassWCustomInit)
def test_shared_object(self):
class MyLayer(keras.layers.Layer):
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
if isinstance(activation, dict):
self.activation = (
serialization_lib.deserialize_keras_object(activation)
)
else:
self.activation = activation
def call(self, x):
return self.activation(x)
def get_config(self):
config = super().get_config()
config["activation"] = self.activation
return config
class SharedActivation:
def __call__(self, x):
return x**2
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
shared_act = SharedActivation()
layer_1 = MyLayer(activation=shared_act)
layer_2 = MyLayer(activation=shared_act)
layers = [layer_1, layer_2]
with serialization_lib.ObjectSharingScope():
serialized, new_layers, reserialized = self.roundtrip(
layers,
custom_objects={
"MyLayer": MyLayer,
"SharedActivation": SharedActivation,
},
)
self.assertIn("shared_object_id", serialized[0]["config"]["activation"])
obj_id = serialized[0]["config"]["activation"]
self.assertIn("shared_object_id", serialized[1]["config"]["activation"])
self.assertEqual(obj_id, serialized[1]["config"]["activation"])
self.assertIs(layers[0].activation, layers[1].activation)
self.assertIs(new_layers[0].activation, new_layers[1].activation)
def test_layer_sharing(self):
seq = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(5),
keras.layers.Softmax(),
],
)
func = keras.Model(inputs=seq.inputs, outputs=seq.outputs)
serialized, deserialized, reserialized = self.roundtrip(func)
self.assertLen(deserialized.layers, 3)
def test_keras36_custom_function_reloading(self):
@object_registration.register_keras_serializable(package="serial_test")
def custom_registered_fn(x):
return x**2
config36 = {
"module": "builtins",
"class_name": "function",
"config": "custom_registered_fn",
"registered_name": "function",
}
obj = serialization_lib.deserialize_keras_object(config36)
self.assertIs(obj, custom_registered_fn)
config = {
"module": "builtins",
"class_name": "function",
"config": "serial_test>custom_registered_fn",
"registered_name": "function",
}
obj = serialization_lib.deserialize_keras_object(config)
self.assertIs(obj, custom_registered_fn)
def test_layer_instance_as_activation(self):
"""Tests serialization when activation is a Layer instance."""
# Dense layer with ReLU layer as activation
layer_dense_relu = keras.layers.Dense(
units=4, activation=keras.layers.ReLU(name="my_relu")
)
# Build the layer to ensure weights/state are initialized if needed
layer_dense_relu.build(input_shape=(None, 8))
_, restored_dense_relu, _ = self.roundtrip(layer_dense_relu)
# Verify the activation is correctly deserialized as a ReLU layer
self.assertIsInstance(restored_dense_relu.activation, keras.layers.ReLU)
# Verify properties are preserved
self.assertEqual(restored_dense_relu.activation.name, "my_relu")
def test_layer_instance_with_config_as_activation(self):
"""
Tests serialization when activation is a Layer instance with config.
"""
# Conv1D layer with LeakyReLU layer (with config) as activation
leaky_activation = keras.layers.LeakyReLU(
negative_slope=0.15, name="my_leaky"
)
layer_conv_leaky = keras.layers.Conv1D(
filters=2, kernel_size=3, activation=leaky_activation
)
# Build the layer
layer_conv_leaky.build(input_shape=(None, 10, 4))
_, restored_conv_leaky, _ = self.roundtrip(layer_conv_leaky)
# Verify the activation is correctly deserialized as LeakyReLU
self.assertIsInstance(
restored_conv_leaky.activation, keras.layers.LeakyReLU
)
# Verify configuration of the activation layer is preserved
self.assertEqual(restored_conv_leaky.activation.negative_slope, 0.15)
self.assertEqual(restored_conv_leaky.activation.name, "my_leaky")
def test_layer_string_as_activation(self):
"""Tests serialization when activation is a string."""
layer_dense_relu_string = keras.layers.Dense(units=4, activation="relu")
layer_dense_relu_string.build(input_shape=(None, 8))
_, restored_dense_relu_string, _ = self.roundtrip(
layer_dense_relu_string
)
# Verify the activation is correctly deserialized to the relu function
self.assertTrue(callable(restored_dense_relu_string.activation))
# Check if it resolves to the canonical keras activation function
self.assertEqual(
restored_dense_relu_string.activation, keras.activations.relu
)
@keras.saving.register_keras_serializable()
class MyDense(keras.layers.Layer):
def __init__(
self,
units,
*,
kernel_regularizer=None,
kernel_initializer=None,
**kwargs,
):
super().__init__(**kwargs)
self._units = units
self._kernel_regularizer = kernel_regularizer
self._kernel_initializer = kernel_initializer
def get_config(self):
return dict(
units=self._units,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
**super().get_config(),
)
def build(self, input_shape):
_, input_units = input_shape
self._kernel = self.add_weight(
name="kernel",
shape=[input_units, self._units],
dtype="float32",
regularizer=self._kernel_regularizer,
initializer=self._kernel_initializer,
)
def call(self, inputs):
return ops.matmul(inputs, self._kernel)
@keras.saving.register_keras_serializable()
class MyWrapper(keras.layers.Layer):
def __init__(self, wrapped, **kwargs):
super().__init__(**kwargs)
self._wrapped = wrapped
def get_config(self):
return dict(wrapped=self._wrapped, **super().get_config())
@classmethod
def from_config(cls, config):
config["wrapped"] = keras.saving.deserialize_keras_object(
config["wrapped"]
)
return cls(**config)
def call(self, inputs):
return self._wrapped(inputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/object_registration.py | keras/src/saving/object_registration.py | import inspect
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
GLOBAL_CUSTOM_OBJECTS = {}
GLOBAL_CUSTOM_NAMES = {}
@keras_export(
[
"keras.saving.CustomObjectScope",
"keras.saving.custom_object_scope",
"keras.utils.CustomObjectScope",
"keras.utils.custom_object_scope",
]
)
class CustomObjectScope:
"""Exposes custom classes/functions to Keras deserialization internals.
Under a scope `with custom_object_scope(objects_dict)`, Keras methods such
as `keras.models.load_model()` or
`keras.models.model_from_config()` will be able to deserialize any
custom object referenced by a saved config (e.g. a custom layer or metric).
Example:
Consider a custom regularizer `my_regularizer`:
```python
layer = Dense(3, kernel_regularizer=my_regularizer)
# Config contains a reference to `my_regularizer`
config = layer.get_config()
...
# Later:
with custom_object_scope({'my_regularizer': my_regularizer}):
layer = Dense.from_config(config)
```
Args:
custom_objects: Dictionary of `{str: object}` pairs,
where the `str` key is the object name.
"""
def __init__(self, custom_objects):
self.custom_objects = custom_objects or {}
self.backup = None
def __enter__(self):
self.backup = global_state.get_global_attribute(
"custom_objects_scope_dict", {}
).copy()
global_state.set_global_attribute(
"custom_objects_scope_dict", self.custom_objects.copy()
)
return self
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"custom_objects_scope_dict", self.backup.copy()
)
# Alias.
custom_object_scope = CustomObjectScope
@keras_export(
[
"keras.saving.get_custom_objects",
"keras.utils.get_custom_objects",
]
)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Custom objects set using `custom_object_scope()` are not added to the
global dictionary of custom objects, and will not appear in the returned
dictionary.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary mapping registered class names to classes.
"""
return GLOBAL_CUSTOM_OBJECTS
@keras_export(
[
"keras.saving.register_keras_serializable",
"keras.utils.register_keras_serializable",
]
)
def register_keras_serializable(package="Custom", name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key `'package>name'` where `name`,
defaults to the object name if not passed.
Example:
```python
# Note that `'my_package'` is used as the `package` argument here, and since
# the `name` argument is not provided, `'MyDense'` is used as the `name`.
@register_keras_serializable('my_package')
class MyDense(keras.layers.Dense):
pass
assert get_registered_object('my_package>MyDense') == MyDense
assert get_registered_name(MyDense) == 'my_package>MyDense'
```
Args:
package: The package that this class belongs to. This is used for the
`key` (which is `"package>name"`) to identify the class. Note that
this is the first argument passed into the decorator.
name: The name to serialize this class under in this package. If not
provided or `None`, the class' name will be used (note that this is
the case when the decorator is used with only one argument, which
becomes the `package`).
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = f"{package}>{class_name}"
if inspect.isclass(arg) and not hasattr(arg, "get_config"):
raise ValueError(
"Cannot register a class that does not have a "
"get_config() method."
)
GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
@keras_export(
[
"keras.saving.get_registered_name",
"keras.utils.get_registered_name",
]
)
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in GLOBAL_CUSTOM_NAMES:
return GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@keras_export(
[
"keras.saving.get_registered_object",
"keras.utils.get_registered_object",
]
)
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```python
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.saving.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library
implementers.
Returns:
An instantiable class associated with `name`, or `None` if no such class
exists.
"""
custom_objects_scope_dict = global_state.get_global_attribute(
"custom_objects_scope_dict", {}
)
if name in custom_objects_scope_dict:
return custom_objects_scope_dict[name]
elif name in GLOBAL_CUSTOM_OBJECTS:
return GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/saving_lib_test.py | keras/src/saving/saving_lib_test.py | """Tests for Keras python-based idempotent saving functions."""
import json
import os
import warnings
import zipfile
from io import BytesIO
from pathlib import Path
from unittest import mock
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.saving import saving_lib
@keras.saving.register_keras_serializable(package="my_custom_package")
class MyDense(keras.layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.nested_layer = keras.layers.Dense(self.units, name="dense")
def build(self, input_shape):
self.additional_weights = [
self.add_weight(
shape=(),
name="my_additional_weight",
initializer="ones",
trainable=True,
),
self.add_weight(
shape=(),
name="my_additional_weight_2",
initializer="ones",
trainable=True,
),
]
self.weights_in_dict = {
"my_weight": self.add_weight(
shape=(),
name="my_dict_weight",
initializer="ones",
trainable=True,
),
}
self.nested_layer.build(input_shape)
def call(self, inputs):
return self.nested_layer(inputs)
def two(self):
return 2
ASSETS_DATA = "These are my assets"
VARIABLES_DATA = np.random.random((10,))
@keras.saving.register_keras_serializable(package="my_custom_package")
class LayerWithCustomSaving(MyDense):
def build(self, input_shape):
self.assets = ASSETS_DATA
self.stored_variables = VARIABLES_DATA
return super().build(input_shape)
def save_assets(self, inner_path):
with open(os.path.join(inner_path, "assets.txt"), "w") as f:
f.write(self.assets)
def save_own_variables(self, store):
store["variables"] = self.stored_variables
def load_assets(self, inner_path):
with open(os.path.join(inner_path, "assets.txt"), "r") as f:
text = f.read()
self.assets = text
def load_own_variables(self, store):
self.stored_variables = np.array(store["variables"])
@keras.saving.register_keras_serializable(package="my_custom_package")
class CustomModelX(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1, name="my_dense_1")
self.dense2 = MyDense(1, name="my_dense_2")
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def one(self):
return 1
@keras.saving.register_keras_serializable(package="my_custom_package")
class ModelWithCustomSaving(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_dense = LayerWithCustomSaving(1)
def call(self, inputs):
return self.custom_dense(inputs)
@keras.saving.register_keras_serializable(package="my_custom_package")
class CompileOverridingModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1)
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
def call(self, inputs):
return self.dense1(inputs)
@keras.saving.register_keras_serializable(package="my_custom_package")
class CompileOverridingSequential(keras.Sequential):
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
@keras.saving.register_keras_serializable(package="my_custom_package")
class SubclassFunctional(keras.Model):
"""Subclassed functional identical to `_get_basic_functional_model`."""
def __init__(self, **kwargs):
inputs = keras.Input(shape=(4,), batch_size=2)
dense = keras.layers.Dense(1, name="first_dense")
x = dense(inputs)
outputs = keras.layers.Dense(1, name="second_dense")(x)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
# Attrs for layers in the functional graph should not affect saving
self.layer_attr = dense
@property
def layer_property(self):
# Properties for layers in the functional graph should not affect saving
return self.layer_attr
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
@keras.saving.register_keras_serializable(package="my_custom_package")
def my_mean_squared_error(y_true, y_pred):
"""Identical to built-in `mean_squared_error`, but as a custom fn."""
return ops.mean(ops.square(y_pred - y_true), axis=-1)
def _get_subclassed_model(compile=True):
subclassed_model = CustomModelX(name="custom_model_x")
if compile:
subclassed_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return subclassed_model
def _get_custom_sequential_model(compile=True):
sequential_model = keras.Sequential(
[MyDense(1), MyDense(1)], name="sequential"
)
if compile:
sequential_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return sequential_model
def _get_basic_sequential_model(compile=True):
sequential_model = keras.Sequential(
[
keras.layers.Dense(1, name="dense_1"),
keras.layers.Dense(1, name="dense_2"),
],
name="sequential",
)
if compile:
sequential_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return sequential_model
def _get_custom_functional_model(compile=True):
inputs = keras.Input(shape=(4,), batch_size=2)
x = MyDense(1, name="first_dense")(inputs)
outputs = MyDense(1, name="second_dense")(x)
functional_model = keras.Model(inputs, outputs)
if compile:
functional_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return functional_model
def _get_basic_functional_model(compile=True):
inputs = keras.Input(shape=(4,), batch_size=2)
x = keras.layers.Dense(1, name="first_dense")(inputs)
outputs = keras.layers.Dense(1, name="second_dense")(x)
functional_model = keras.Model(inputs, outputs)
if compile:
functional_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return functional_model
def _get_subclassed_functional_model(compile=True):
functional_model = SubclassFunctional()
if compile:
functional_model.compile(
optimizer="adam",
loss=my_mean_squared_error,
metrics=[keras.metrics.Hinge(), "mse"],
)
return functional_model
# We need a global function for `Pool.apply_async`
def _load_model_fn(filepath):
saving_lib.load_model(filepath)
class SavingTest(testing.TestCase):
def setUp(self):
# Set `_MEMORY_UPPER_BOUND` to zero for testing purpose.
self.original_value = saving_lib._MEMORY_UPPER_BOUND
saving_lib._MEMORY_UPPER_BOUND = 0
return super().setUp()
def tearDown(self):
saving_lib._MEMORY_UPPER_BOUND = self.original_value
return super().tearDown()
def _test_inference_after_instantiation(self, model):
x_ref = np.random.random((2, 4))
y_ref = model(x_ref)
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model.save(temp_filepath)
loaded_model = saving_lib.load_model(temp_filepath)
self.assertFalse(model.compiled)
for w_ref, w in zip(model.variables, loaded_model.variables):
self.assertAllClose(w_ref, w)
self.assertAllClose(y_ref, loaded_model(x_ref))
@parameterized.named_parameters(
("subclassed", _get_subclassed_model),
("basic_sequential", _get_basic_sequential_model),
("basic_functional", _get_basic_functional_model),
("custom_sequential", _get_custom_sequential_model),
("custom_functional", _get_custom_functional_model),
("subclassed_functional", _get_subclassed_functional_model),
)
def test_inference_after_instantiation(self, model_fn):
model = model_fn(compile=False)
self._test_inference_after_instantiation(model)
# Test small model path
saving_lib._MEMORY_UPPER_BOUND = 1.0
self._test_inference_after_instantiation(model)
def _test_compile_preserved(self, model):
x_ref = np.random.random((2, 4))
y_ref = np.random.random((2, 1))
model.fit(x_ref, y_ref)
out_ref = model(x_ref)
ref_metrics = model.evaluate(x_ref, y_ref)
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model.save(temp_filepath)
loaded_model = saving_lib.load_model(temp_filepath)
self.assertTrue(model.compiled)
self.assertTrue(loaded_model.built)
for w_ref, w in zip(model.variables, loaded_model.variables):
self.assertAllClose(w_ref, w)
self.assertAllClose(out_ref, loaded_model(x_ref))
self.assertEqual(
model.optimizer.__class__, loaded_model.optimizer.__class__
)
self.assertEqual(
model.optimizer.get_config(), loaded_model.optimizer.get_config()
)
for w_ref, w in zip(
model.optimizer.variables, loaded_model.optimizer.variables
):
self.assertAllClose(w_ref, w)
new_metrics = loaded_model.evaluate(x_ref, y_ref)
for ref_m, m in zip(ref_metrics, new_metrics):
self.assertAllClose(ref_m, m)
@parameterized.named_parameters(
("subclassed", _get_subclassed_model),
("basic_sequential", _get_basic_sequential_model),
("basic_functional", _get_basic_functional_model),
("custom_sequential", _get_custom_sequential_model),
("custom_functional", _get_custom_functional_model),
("subclassed_functional", _get_subclassed_functional_model),
)
@pytest.mark.requires_trainable_backend
def test_compile_preserved(self, model_fn):
model = model_fn(compile=True)
self._test_compile_preserved(model)
# Test small model path
saving_lib._MEMORY_UPPER_BOUND = 1.0
self._test_compile_preserved(model)
def test_saving_preserve_unbuilt_state(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = CustomModelX()
subclassed_model.save(temp_filepath)
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(subclassed_model.compiled, loaded_model.compiled)
self.assertFalse(subclassed_model.built)
self.assertFalse(loaded_model.built)
@pytest.mark.requires_trainable_backend
def test_saved_module_paths_and_class_names(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = _get_subclassed_model()
x = np.random.random((100, 32))
y = np.random.random((100, 1))
subclassed_model.fit(x, y, epochs=1)
subclassed_model.save(temp_filepath)
with zipfile.ZipFile(temp_filepath, "r") as z:
with z.open(saving_lib._CONFIG_FILENAME, "r") as c:
config_json = c.read()
config_dict = json.loads(config_json)
self.assertEqual(
config_dict["registered_name"], "my_custom_package>CustomModelX"
)
self.assertEqual(
config_dict["compile_config"]["optimizer"],
keras.src.saving.serialize_keras_object(
keras.src.optimizers.get("adam")
),
)
self.assertEqual(
config_dict["compile_config"]["loss"]["config"],
"my_custom_package>my_mean_squared_error",
)
@pytest.mark.requires_trainable_backend
def test_saving_custom_assets_and_variables(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = ModelWithCustomSaving()
model.compile(
optimizer="adam",
loss="mse",
)
x = np.random.random((100, 32))
y = np.random.random((100, 1))
model.fit(x, y, epochs=1)
# Assert that the archive has not been saved.
self.assertFalse(os.path.exists(temp_filepath))
model.save(temp_filepath)
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(loaded_model.custom_dense.assets, ASSETS_DATA)
self.assertEqual(
loaded_model.custom_dense.stored_variables.tolist(),
VARIABLES_DATA.tolist(),
)
def _test_compile_overridden_warnings(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = (
CompileOverridingModel()
if model_type == "subclassed"
else CompileOverridingSequential(
[keras.layers.Embedding(4, 1), MyDense(1), MyDense(1)]
)
)
model.compile("sgd", "mse")
model.save(temp_filepath)
with mock.patch.object(warnings, "warn") as mock_warn:
saving_lib.load_model(temp_filepath)
if not mock_warn.call_args_list:
raise AssertionError("Did not warn.")
self.assertIn(
"`compile()` was not called as part of model loading "
"because the model's `compile()` method is custom. ",
mock_warn.call_args_list[0][0][0],
)
def test_compile_overridden_warnings_sequential(self):
self._test_compile_overridden_warnings("sequential")
def test_compile_overridden_warnings_subclassed(self):
self._test_compile_overridden_warnings("subclassed")
def test_metadata(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "my_model.keras")
)
model = CompileOverridingModel()
model.save(temp_filepath)
with zipfile.ZipFile(temp_filepath, "r") as z:
with z.open(saving_lib._METADATA_FILENAME, "r") as c:
metadata_json = c.read()
metadata = json.loads(metadata_json)
self.assertIn("keras_version", metadata)
self.assertIn("date_saved", metadata)
# def test_gfile_copy_local_called(self):
# temp_filepath = Path(
# os.path.join(self.get_temp_dir(), "my_model.keras")
# )
# model = CompileOverridingModel()
# with mock.patch(
# "re.match", autospec=True
# ) as mock_re_match, mock.patch(
# "tensorflow.compat.v2.io.file_utils.copy", autospec=True
# ) as mock_copy:
# # Mock Remote Path check to true to test gfile copy logic
# mock_re_match.return_value = True
# model.save(temp_filepath)
# mock_re_match.assert_called()
# mock_copy.assert_called()
# self.assertIn(str(temp_filepath), mock_re_match.call_args.args)
# self.assertIn(str(temp_filepath), mock_copy.call_args.args)
def test_save_load_weights_only(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
)
model = _get_basic_functional_model()
ref_input = np.random.random((2, 4))
ref_output = model.predict(ref_input)
saving_lib.save_weights_only(model, temp_filepath)
model = _get_basic_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
# Test with Model method
model = _get_basic_functional_model()
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_save_weights_only_with_unbuilt_model(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
)
model = _get_subclassed_model()
with self.assertRaisesRegex(
ValueError, "You are saving a model that has not yet been built."
):
saving_lib.save_weights_only(model, temp_filepath)
def test_load_weights_only_with_unbuilt_model(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
)
model = _get_subclassed_model()
x = np.random.random((100, 32))
_ = model.predict(x) # Build the model by calling it on some data
saving_lib.save_weights_only(model, temp_filepath)
saving_lib.load_weights_only(model, temp_filepath)
new_model = _get_subclassed_model()
with self.assertRaisesRegex(
ValueError,
"You are loading weights into a model that has not yet been built.",
):
saving_lib.load_weights_only(new_model, temp_filepath)
def test_load_weights_only_with_keras_file(self):
# Test loading weights from whole saved model
temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras"))
model = _get_basic_functional_model()
ref_input = np.random.random((2, 4))
ref_output = model.predict(ref_input)
saving_lib.save_model(model, temp_filepath)
model = _get_basic_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
# Test with Model method
model = _get_basic_functional_model()
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_save_weights_subclassed_functional(self):
# The subclassed and basic functional model should have the same
# weights structure.
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
)
model = _get_basic_functional_model()
ref_input = np.random.random((2, 4))
ref_output = model.predict(ref_input)
# Test saving basic, loading subclassed.
saving_lib.save_weights_only(model, temp_filepath)
model = _get_subclassed_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
# Test saving subclassed, loading basic.
saving_lib.save_weights_only(model, temp_filepath)
model = _get_basic_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
@pytest.mark.requires_trainable_backend
def test_compile_arg(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
model = _get_basic_functional_model()
model.compile("sgd", "mse")
model.fit(np.random.random((2, 4)), np.random.random((2, 1)))
saving_lib.save_model(model, temp_filepath)
model = saving_lib.load_model(temp_filepath)
self.assertEqual(model.compiled, True)
model = saving_lib.load_model(temp_filepath, compile=False)
self.assertEqual(model.compiled, False)
# def test_overwrite(self):
# temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
# model = _get_basic_functional_model()
# model.save(temp_filepath)
# model.save(temp_filepath, overwrite=True)
# with self.assertRaises(EOFError):
# model.save(temp_filepath, overwrite=False)
# temp_filepath = os.path.join(
# self.get_temp_dir(), "mymodel.weights.h5"
# )
# model = _get_basic_functional_model()
# model.save_weights(temp_filepath)
# model.save_weights(temp_filepath, overwrite=True)
# with self.assertRaises(EOFError):
# model.save_weights(temp_filepath, overwrite=False)
def test_partial_load(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
original_model = keras.Sequential(
[
keras.Input(shape=(3,), batch_size=2),
keras.layers.Dense(4),
keras.layers.Dense(5),
]
)
original_model.save(temp_filepath)
# Test with a model that has a differently shaped layer
new_model = keras.Sequential(
[
keras.Input(shape=(3,), batch_size=2),
keras.layers.Dense(4),
keras.layers.Dense(6),
]
)
new_layer_kernel_value = np.array(new_model.layers[1].kernel)
with self.assertRaisesRegex(ValueError, "must match"):
# Doesn't work by default
new_model.load_weights(temp_filepath)
# Now it works
new_model.load_weights(temp_filepath, skip_mismatch=True)
ref_weights = original_model.layers[0].get_weights()
new_weights = new_model.layers[0].get_weights()
self.assertEqual(len(ref_weights), len(new_weights))
for ref_w, w in zip(ref_weights, new_weights):
self.assertAllClose(ref_w, w)
self.assertAllClose(
np.array(new_model.layers[1].kernel), new_layer_kernel_value
)
# Test with a model that has a new layer at the end
new_model = keras.Sequential(
[
keras.Input(shape=(3,), batch_size=2),
keras.layers.Dense(4),
keras.layers.Dense(5),
keras.layers.Dense(5),
]
)
new_layer_kernel_value = np.array(new_model.layers[2].kernel)
with self.assertRaisesRegex(ValueError, "received 0 variables"):
# Doesn't work by default
new_model.load_weights(temp_filepath)
# Now it works
new_model.load_weights(temp_filepath, skip_mismatch=True)
for layer_index in [0, 1]:
ref_weights = original_model.layers[layer_index].get_weights()
new_weights = new_model.layers[layer_index].get_weights()
self.assertEqual(len(ref_weights), len(new_weights))
for ref_w, w in zip(ref_weights, new_weights):
self.assertAllClose(ref_w, w)
self.assertAllClose(
np.array(new_model.layers[2].kernel), new_layer_kernel_value
)
@pytest.mark.requires_trainable_backend
def test_save_to_fileobj(self):
model = keras.Sequential(
[keras.layers.Dense(1, input_shape=(1,)), keras.layers.Dense(1)]
)
model.compile(optimizer="adam", loss="mse")
out = BytesIO()
saving_lib.save_model(model, out)
out.seek(0)
model = saving_lib.load_model(out)
model.fit(np.array([1, 2]), np.array([1, 2]))
pred1 = model.predict(np.array([1, 2]))
out = BytesIO()
saving_lib.save_model(model, out)
out.seek(0)
new_model = saving_lib.load_model(out)
pred2 = new_model.predict(np.array([1, 2]))
self.assertAllClose(pred1, pred2, atol=1e-5)
@parameterized.named_parameters(
("high_memory_config", True),
("low_memory_config", False),
)
def test_save_model_exception_raised(self, is_memory_sufficient):
if is_memory_sufficient:
saving_lib._MEMORY_UPPER_BOUND = 0.5 # 50%
# Assume we have an error in `save_own_variables`.
class RaiseErrorLayer(keras.layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(units)
def call(self, inputs):
return self.dense(inputs)
def save_own_variables(self, store):
raise ValueError
model = keras.Sequential([keras.Input([1]), RaiseErrorLayer(1)])
filepath = f"{self.get_temp_dir()}/model.keras"
with self.assertRaises(ValueError):
saving_lib.save_model(model, filepath)
# Ensure we don't have a bad "model.weights.h5" inside the zip file.
self.assertTrue(Path(filepath).exists())
with zipfile.ZipFile(filepath) as zf:
all_filenames = zf.namelist()
self.assertNotIn("model.weights.h5", all_filenames)
# Ensure we don't have any temporary files left.
self.assertLen(os.listdir(Path(filepath).parent), 1)
self.assertIn("model.keras", os.listdir(Path(filepath).parent))
@parameterized.named_parameters(
("high_memory_config", True),
("low_memory_config", False),
)
def test_load_model_exception_raised(self, is_memory_sufficient):
if is_memory_sufficient:
saving_lib._MEMORY_UPPER_BOUND = 0.5 # 50%
# Assume we have an error in `load_own_variables`.
class RaiseErrorLayer(keras.layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(units)
def call(self, inputs):
return self.dense(inputs)
def load_own_variables(self, store):
raise ValueError
model = keras.Sequential([keras.Input([1]), RaiseErrorLayer(1)])
filepath = f"{self.get_temp_dir()}/model.keras"
saving_lib.save_model(model, filepath)
with self.assertRaises(ValueError):
saving_lib.load_model(
filepath, custom_objects={"RaiseErrorLayer": RaiseErrorLayer}
)
# Ensure we don't have any temporary files left.
self.assertLen(os.listdir(Path(filepath).parent), 1)
self.assertIn("model.keras", os.listdir(Path(filepath).parent))
def test_load_model_read_only_system(self):
model = keras.Sequential([keras.Input([1]), keras.layers.Dense(32)])
filepath = f"{self.get_temp_dir()}/model.keras"
saving_lib.save_model(model, filepath)
# Load the model correctly, regardless of whether an OSError occurs.
original_mode = os.stat(Path(filepath).parent).st_mode
os.chmod(Path(filepath).parent, mode=0o555)
model = saving_lib.load_model(filepath)
os.chmod(Path(filepath).parent, mode=original_mode)
# Ensure we don't have any temporary files left.
self.assertLen(os.listdir(Path(filepath).parent), 1)
self.assertIn("model.keras", os.listdir(Path(filepath).parent))
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX backend doesn't support Python's multiprocessing",
)
@pytest.mark.skipif(
testing.tensorflow_uses_gpu() or testing.torch_uses_gpu(),
reason="This test doesn't support GPU",
)
def test_load_model_concurrently(self):
import multiprocessing as mp
model = keras.Sequential([keras.Input([1]), keras.layers.Dense(2)])
filepath = f"{self.get_temp_dir()}/model.keras"
saving_lib.save_model(model, filepath)
# Load the model concurrently.
results = []
with mp.Pool(4) as pool:
for i in range(4):
results.append(pool.apply_async(_load_model_fn, (filepath,)))
pool.close()
pool.join()
[r.get() for r in results] # No error occurs here
def test_load_model_containing_reused_layer(self):
# https://github.com/keras-team/keras/issues/20307
inputs = keras.Input((4,))
reused_layer = keras.layers.Dense(4)
x = reused_layer(inputs)
x = keras.layers.Dense(4)(x)
outputs = reused_layer(x)
model = keras.Model(inputs, outputs)
self.assertLen(model.layers, 3) # Input + 2 Dense layers
self._test_inference_after_instantiation(model)
@parameterized.named_parameters(
("efficientnet_b0_512", "efficientnet_b0", 1), # Only 1 sharded file.
("efficientnet_b0_10", "efficientnet_b0", 0.01),
)
def test_weights_sharding(self, model_name, max_shard_size):
from keras.src.applications import efficientnet
if backend.image_data_format() == "channels_last":
shape = (224, 224, 3)
else:
shape = (3, 224, 224)
if model_name == "efficientnet_b0":
model_fn = efficientnet.EfficientNetB0
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.json")
)
model = model_fn(weights=None, input_shape=shape)
ref_input = np.random.random((1, *shape)).astype("float32")
ref_output = model.predict(ref_input)
# Save the sharded files.
saving_lib.save_weights_only(
model, temp_filepath, max_shard_size=max_shard_size
)
self.assertIn("mymodel.weights.json", os.listdir(temp_filepath.parent))
if max_shard_size == 1:
# 1 sharded file + 1 config file = 2.
self.assertLen(os.listdir(temp_filepath.parent), 2)
elif max_shard_size == 0.01:
# 3 sharded file + 1 config file = 4.
self.assertLen(os.listdir(temp_filepath.parent), 4)
with open(temp_filepath, "r") as f:
sharding_config = json.load(f)
self.assertIn("metadata", sharding_config)
self.assertIn("weight_map", sharding_config)
# Instantiate new model and load the sharded files.
model = model_fn(weights=None, input_shape=shape)
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
class SavingAPITest(testing.TestCase):
def test_saving_api_errors(self):
from keras.src.saving import saving_api
model = _get_basic_functional_model()
# Saving API errors
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel")
with self.assertRaisesRegex(ValueError, "argument is deprecated"):
saving_api.save_model(model, temp_filepath, save_format="keras")
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.notkeras")
with self.assertRaisesRegex(ValueError, "Invalid filepath extension"):
saving_api.save_model(model, temp_filepath)
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
with self.assertRaisesRegex(ValueError, "are not supported"):
saving_api.save_model(model, temp_filepath, invalid_arg="hello")
# Loading API errors
temp_filepath = os.path.join(self.get_temp_dir(), "non_existent.keras")
with self.assertRaisesRegex(
ValueError, "Please ensure the file is an accessible"
):
_ = saving_api.load_model(temp_filepath)
temp_filepath = os.path.join(self.get_temp_dir(), "my_saved_model")
with self.assertRaisesRegex(ValueError, "File format not supported"):
_ = saving_api.load_model(temp_filepath)
def test_model_api_endpoint(self):
temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras"))
model = _get_basic_functional_model()
ref_input = np.random.random((2, 4))
ref_output = model.predict(ref_input)
model.save(temp_filepath)
model = keras.saving.load_model(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_model_api_endpoint_h5(self):
temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.h5"))
model = _get_basic_functional_model()
ref_input = np.random.random((2, 4))
ref_output = model.predict(ref_input)
model.save(temp_filepath)
model = keras.saving.load_model(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_model_api_errors(self):
model = _get_basic_functional_model()
# Saving API errors
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel")
with self.assertRaisesRegex(ValueError, "argument is deprecated"):
model.save(temp_filepath, save_format="keras")
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.notkeras")
with self.assertRaisesRegex(ValueError, "Invalid filepath extension"):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/keras_saveable.py | keras/src/saving/keras_saveable.py | import io
class KerasSaveable:
# Note: renaming this function will cause old pickles to be broken.
# This is probably not a huge deal, as pickle should not be a recommended
# saving format -- it should only be supported for use with distributed
# computing frameworks.
def _obj_type(self):
raise NotImplementedError(
"KerasSaveable subclases must provide an "
"implementation for `obj_type()`"
)
@classmethod
def _unpickle_model(cls, bytesio):
import keras.src.saving.saving_lib as saving_lib
# pickle is not safe regardless of what you do.
return saving_lib._load_model_from_fileobj(
bytesio, custom_objects=None, compile=True, safe_mode=False
)
def __reduce__(self):
"""__reduce__ is used to customize the behavior of `pickle.pickle()`.
The method returns a tuple of two elements: a function, and a list of
arguments to pass to that function. In this case we just leverage the
keras saving library."""
import keras.src.saving.saving_lib as saving_lib
buf = io.BytesIO()
saving_lib._save_model_to_fileobj(self, buf, "h5")
return (
self._unpickle_model,
(buf,),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/__init__.py | keras/src/saving/__init__.py | from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import custom_object_scope
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/saving_api.py | keras/src/saving/saving_api.py | import os
import zipfile
from absl import logging
from keras.src.api_export import keras_export
from keras.src.legacy.saving import legacy_h5_format
from keras.src.saving import saving_lib
from keras.src.utils import file_utils
from keras.src.utils import io_utils
try:
import h5py
except ImportError:
h5py = None
@keras_export(["keras.saving.save_model", "keras.models.save_model"])
def save_model(model, filepath, overwrite=True, zipped=None, **kwargs):
"""Saves a model as a `.keras` file.
Args:
model: Keras model instance to be saved.
filepath: `str` or `pathlib.Path` object. Path where to save the model.
overwrite: Whether we should overwrite any existing model at the target
location, or instead ask the user via an interactive prompt.
zipped: Whether to save the model as a zipped `.keras`
archive (default when saving locally), or as an unzipped directory
(default when saving on the Hugging Face Hub).
Example:
```python
model = keras.Sequential(
[
keras.layers.Dense(5, input_shape=(3,)),
keras.layers.Softmax(),
],
)
model.save("model.keras")
loaded_model = keras.saving.load_model("model.keras")
x = keras.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
Note that `model.save()` is an alias for `keras.saving.save_model()`.
The saved `.keras` file is a `zip` archive that contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state.
"""
include_optimizer = kwargs.pop("include_optimizer", True)
save_format = kwargs.pop("save_format", False)
if save_format:
if str(filepath).endswith((".h5", ".hdf5")) or str(filepath).endswith(
".keras"
):
logging.warning(
"The `save_format` argument is deprecated in Keras 3. "
"We recommend removing this argument as it can be inferred "
"from the file path. "
f"Received: save_format={save_format}"
)
else:
raise ValueError(
"The `save_format` argument is deprecated in Keras 3. "
"Please remove this argument and pass a file path with "
"either `.keras` or `.h5` extension."
f"Received: save_format={save_format}"
)
if kwargs:
raise ValueError(
"The following argument(s) are not supported: "
f"{list(kwargs.keys())}"
)
# Deprecation warnings
if str(filepath).endswith((".h5", ".hdf5")):
logging.warning(
"You are saving your model as an HDF5 file via "
"`model.save()` or `keras.saving.save_model(model)`. "
"This file format is considered legacy. "
"We recommend using instead the native Keras format, "
"e.g. `model.save('my_model.keras')` or "
"`keras.saving.save_model(model, 'my_model.keras')`. "
)
is_hf = str(filepath).startswith("hf://")
if zipped is None:
zipped = not is_hf # default behavior depends on destination
# If file exists and should not be overwritten.
try:
exists = (not is_hf) and os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if zipped and str(filepath).endswith(".keras"):
return saving_lib.save_model(model, filepath)
if not zipped:
return saving_lib.save_model(model, filepath, zipped=False)
if str(filepath).endswith((".h5", ".hdf5")):
return legacy_h5_format.save_model_to_hdf5(
model, filepath, overwrite, include_optimizer
)
raise ValueError(
"Invalid filepath extension for saving. "
"Please add either a `.keras` extension for the native Keras "
f"format (recommended) or a `.h5` extension. "
"Use `model.export(filepath)` if you want to export a SavedModel "
"for use with TFLite/TFServing/etc. "
f"Received: filepath={filepath}."
)
@keras_export(["keras.saving.load_model", "keras.models.load_model"])
def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
"""Loads a model saved via `model.save()`.
Args:
filepath: `str` or `pathlib.Path` object, path to the saved model file.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model after loading.
safe_mode: Boolean, whether to disallow unsafe `lambda` deserialization.
When `safe_mode=False`, loading an object has the potential to
trigger arbitrary code execution. This argument is only
applicable to the Keras v3 model format. Defaults to `True`.
Returns:
A Keras model instance. If the original model was compiled,
and the argument `compile=True` is set, then the returned model
will be compiled. Otherwise, the model will be left uncompiled.
Example:
```python
model = keras.Sequential([
keras.layers.Dense(5, input_shape=(3,)),
keras.layers.Softmax()])
model.save("model.keras")
loaded_model = keras.saving.load_model("model.keras")
x = np.random.random((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
Note that the model variables may have different name values
(`var.name` property, e.g. `"dense_1/kernel:0"`) after being reloaded.
It is recommended that you use layer attributes to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
"""
is_keras_zip = str(filepath).endswith(".keras") and zipfile.is_zipfile(
filepath
)
is_keras_dir = file_utils.isdir(filepath) and file_utils.exists(
file_utils.join(filepath, "config.json")
)
is_hf = str(filepath).startswith("hf://")
# Support for remote zip files
if (
file_utils.is_remote_path(filepath)
and not file_utils.isdir(filepath)
and not is_keras_zip
and not is_hf
):
local_path = file_utils.join(
saving_lib.get_temp_dir(), os.path.basename(filepath)
)
# Copy from remote to temporary local directory
file_utils.copy(filepath, local_path)
# Switch filepath to local zipfile for loading model
if zipfile.is_zipfile(local_path):
filepath = local_path
is_keras_zip = True
if is_keras_zip or is_keras_dir or is_hf:
return saving_lib.load_model(
filepath,
custom_objects=custom_objects,
compile=compile,
safe_mode=safe_mode,
)
if str(filepath).endswith((".h5", ".hdf5")):
return legacy_h5_format.load_model_from_hdf5(
filepath,
custom_objects=custom_objects,
compile=compile,
safe_mode=safe_mode,
)
elif str(filepath).endswith(".keras"):
raise ValueError(
f"File not found: filepath={filepath}. "
"Please ensure the file is an accessible `.keras` "
"zip file."
)
else:
raise ValueError(
f"File format not supported: filepath={filepath}. "
"Keras 3 only supports V3 `.keras` files and "
"legacy H5 format files (`.h5` extension). "
"Note that the legacy SavedModel format is not "
"supported by `load_model()` in Keras 3. In "
"order to reload a TensorFlow SavedModel as an "
"inference-only layer in Keras 3, use "
"`keras.layers.TFSMLayer("
f"{filepath}, call_endpoint='serving_default')` "
"(note that your `call_endpoint` "
"might have a different name)."
)
@keras_export("keras.saving.save_weights")
def save_weights(
model, filepath, overwrite=True, max_shard_size=None, **kwargs
):
filepath_str = str(filepath)
if max_shard_size is None and not filepath_str.endswith(".weights.h5"):
raise ValueError(
"The filename must end in `.weights.h5`. "
f"Received: filepath={filepath_str}"
)
elif max_shard_size is not None and not filepath_str.endswith(
("weights.h5", "weights.json")
):
raise ValueError(
"The filename must end in `.weights.json` when `max_shard_size` is "
f"specified. Received: filepath={filepath_str}"
)
try:
exists = os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath_str)
if not proceed:
return
saving_lib.save_weights_only(model, filepath, max_shard_size, **kwargs)
@keras_export("keras.saving.load_weights")
def load_weights(model, filepath, skip_mismatch=False, **kwargs):
filepath_str = str(filepath)
# Get the legacy kwargs.
objects_to_skip = kwargs.pop("objects_to_skip", None)
by_name = kwargs.pop("by_name", None)
if kwargs:
raise ValueError(f"Invalid keyword arguments: {kwargs}")
if filepath_str.endswith(".keras"):
if objects_to_skip is not None:
raise ValueError(
"`objects_to_skip` only supports loading '.weights.h5' files."
f"Received: {filepath}"
)
if by_name is not None:
raise ValueError(
"`by_name` only supports loading legacy '.h5' or '.hdf5' "
f"files. Received: {filepath}"
)
saving_lib.load_weights_only(
model, filepath, skip_mismatch=skip_mismatch
)
elif filepath_str.endswith(".weights.h5") or filepath_str.endswith(
".weights.json"
):
if by_name is not None:
raise ValueError(
"`by_name` only supports loading legacy '.h5' or '.hdf5' "
f"files. Received: {filepath}"
)
saving_lib.load_weights_only(
model,
filepath,
skip_mismatch=skip_mismatch,
objects_to_skip=objects_to_skip,
)
elif filepath_str.endswith(".h5") or filepath_str.endswith(".hdf5"):
if not h5py:
raise ImportError(
"Loading a H5 file requires `h5py` to be installed."
)
if objects_to_skip is not None:
raise ValueError(
"`objects_to_skip` only supports loading '.weights.h5' files."
f"Received: {filepath}"
)
with h5py.File(filepath, "r") as f:
if "layer_names" not in f.attrs and "model_weights" in f:
f = f["model_weights"]
if by_name:
legacy_h5_format.load_weights_from_hdf5_group_by_name(
f, model, skip_mismatch
)
else:
legacy_h5_format.load_weights_from_hdf5_group(
f, model, skip_mismatch
)
else:
raise ValueError(
f"File format not supported: filepath={filepath}. "
"Keras 3 only supports V3 `.keras` and `.weights.h5` "
"files, or legacy V1/V2 `.h5` files."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/object_registration_test.py | keras/src/saving/object_registration_test.py | import keras
from keras.src import testing
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
class TestObjectRegistration(testing.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass:
pass
def check_get_in_thread():
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
checked_thread = self.checkedThread(check_get_in_thread)
checked_thread.start()
checked_thread.join()
def test_serialize_custom_class_with_default_name(self):
@object_registration.register_keras_serializable()
class TestClass:
def __init__(self, value):
self._value = value
def get_config(self):
return {"value": self._value}
@classmethod
def from_config(cls, config):
return cls(**config)
serialized_name = "Custom>TestClass"
inst = TestClass(value=10)
class_name = object_registration.GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = serialization_lib.serialize_keras_object(inst)
self.assertEqual("TestClass", config["class_name"])
new_inst = serialization_lib.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
def test_serialize_custom_class_with_custom_name(self):
@object_registration.register_keras_serializable(
"TestPackage", "CustomName"
)
class OtherTestClass:
def __init__(self, val):
self._val = val
def get_config(self):
return {"val": self._val}
@classmethod
def from_config(cls, config):
return cls(**config)
serialized_name = "TestPackage>CustomName"
inst = OtherTestClass(val=5)
class_name = object_registration.GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(OtherTestClass)
self.assertEqual(fn_class_name, class_name)
cls = object_registration.get_registered_object(fn_class_name)
self.assertEqual(OtherTestClass, cls)
config = keras.saving.serialize_keras_object(inst)
self.assertEqual("OtherTestClass", config["class_name"])
new_inst = keras.saving.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@object_registration.register_keras_serializable()
def my_fn():
return 42
serialized_name = "Custom>my_fn"
class_name = object_registration.GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(my_fn)
self.assertEqual(fn_class_name, class_name)
config = keras.saving.serialize_keras_object(my_fn)
fn = keras.saving.deserialize_keras_object(config)
self.assertEqual(42, fn())
fn_2 = object_registration.get_registered_object(fn_class_name)
self.assertEqual(42, fn_2())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError,
"Cannot register a class that does not have a get_config.*",
):
@object_registration.register_keras_serializable(
"TestPackage", "TestClass"
)
class TestClass:
def __init__(self, value):
self._value = value
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/saving/file_editor_test.py | keras/src/saving/file_editor_test.py | import os
import numpy as np
import pytest
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.save(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
@pytest.mark.requires_trainable_backend
def test_scalar_weight(self):
model = keras.Sequential(name="my_sequential")
model.add(keras.Input(shape=(1,), name="my_input"))
model.add(keras.layers.Dense(1, activation="sigmoid", name="my_dense"))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
model.fit(np.array([[1]]), np.array([[1]]), verbose=0)
model_fpath = os.path.join(self.get_temp_dir(), "model.keras")
weights_fpath = os.path.join(self.get_temp_dir(), "model.weights.h5")
model.save(model_fpath)
model.save_weights(weights_fpath)
model_editor = KerasFileEditor(model_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_editor.weights_dict)), 8
)
model_weights_editor = KerasFileEditor(weights_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_weights_editor.weights_dict)), 8
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/optimizer_test.py | keras/src/optimizers/optimizer_test.py | import os
import pickle
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import constraints
from keras.src import layers
from keras.src import models
from keras.src import optimizers
from keras.src import testing
class OptimizerTest(testing.TestCase):
def test_iterations_counter(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.Adam(learning_rate=1.0)
self.assertAllClose(optimizer.iterations, 0)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(optimizer.iterations, 1)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(optimizer.iterations, 2)
def test_empty_gradients(self):
# Test no valid gradient
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = None
optimizer = optimizers.SGD(learning_rate=1.0)
with self.assertRaisesRegex(
ValueError, "No gradients provided for any variable."
):
optimizer.apply_gradients([(grads, v)])
# Test filtering of empty gradients
v2 = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads2 = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=1.0)
with self.assertWarns(Warning):
optimizer.apply_gradients([(grads, v), (grads2, v2)])
self.assertAllClose(v, [[3.0, 4.0], [5.0, 6.0]])
self.assertAllClose(v2, [[2.0, 3.0], [4.0, 5.0]])
def test_clip_args(self):
optimizer = optimizers.SGD(learning_rate=1.0, clipnorm=0.1)
self.assertEqual(optimizer.clipnorm, 0.1)
optimizer = optimizers.SGD(learning_rate=1.0, clipvalue=0.1)
self.assertEqual(optimizer.clipvalue, 0.1)
optimizer = optimizers.SGD(learning_rate=1.0, global_clipnorm=0.1)
self.assertEqual(optimizer.global_clipnorm, 0.1)
# Test invalid arguments
with self.assertRaisesRegex(
ValueError,
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can",
):
optimizers.SGD(
learning_rate=1.0,
clipnorm=0.1,
clipvalue=0.1,
)
with self.assertRaisesRegex(
ValueError,
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can",
):
optimizers.SGD(
learning_rate=1.0,
clipnorm=0.1,
global_clipnorm=0.1,
)
def test_clip_norm(self):
optimizer = optimizers.SGD(clipnorm=1)
grad = backend.convert_to_tensor([100.0, 100.0])
clipped_grad = optimizer._clip_gradients([grad])
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = optimizers.SGD(clipvalue=1)
grad = backend.convert_to_tensor([100.0, 100.0])
clipped_grad = optimizer._clip_gradients([grad])
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_global_clip_norm(self):
optimizer = optimizers.SGD(global_clipnorm=1)
grad = np.array([50.0, 100.0], dtype="float32")
global_norm = np.linalg.norm(grad)
clipped_grad = optimizer._clip_gradients(
[backend.convert_to_tensor(grad)]
)
self.assertAllClose(clipped_grad[0], grad / global_norm)
def test_ema(self):
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(
learning_rate=1.0,
use_ema=True,
ema_momentum=0.9,
ema_overwrite_frequency=3,
)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[2.0, 3.0], [4.0, 5.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[2.0, 3.0], [4.0, 5.0]], # initialized after first step
)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.9, 2.9], [3.9, 4.9]],
)
optimizer.apply_gradients([(grads, v)])
# Variables were overwritten with EMA
self.assertAllClose(v, [[1.71, 2.71], [3.71, 4.71]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.71, 2.71], [3.71, 4.71]],
)
@pytest.mark.requires_trainable_backend
def test_ema_with_model_fit(self):
x_train = np.ones((1, 1)).astype("float32")
y_train = np.zeros((1, 1)).astype("float32")
optimizer = optimizers.SGD(
learning_rate=0.1, use_ema=True, ema_momentum=0.9
)
model = models.Sequential(
[layers.Dense(2, kernel_initializer="ones", use_bias=False)]
)
model.compile(loss="mse", optimizer=optimizer, run_eagerly=True)
model.fit(x_train, y_train, batch_size=1, epochs=2)
self.assertAllClose(
optimizer._model_variables_moving_average[0].numpy(),
[[0.891, 0.891]],
atol=1e-5,
)
self.assertAllClose(
model.trainable_variables[0].numpy(),
[[0.891, 0.891]],
atol=1e-5,
)
def test_constraints_are_applied(self):
v = backend.Variable(np.random.random((2, 2)) - 1.0)
v.constraint = constraints.NonNeg()
optimizer = optimizers.SGD(learning_rate=0.0001)
grad = backend.numpy.zeros((2, 2))
optimizer.apply_gradients([(grad, v)])
self.assertAlmostEqual(np.min(v), 0.0)
def test_get_method(self):
obj = optimizers.get("sgd")
self.assertIsInstance(obj, optimizers.SGD)
obj = optimizers.get("adamw")
self.assertIsInstance(obj, optimizers.AdamW)
obj = optimizers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
optimizers.get("typo")
def test_static_loss_scaling(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]]) * 1024.0
optimizer = optimizers.SGD(learning_rate=1.0, loss_scale_factor=1024.0)
optimizer.apply_gradients([(grads, v)])
self.assertEqual(optimizer.scale_loss(1.0), 1024.0)
self.assertAllClose(v, [[0.0, 0.0], [0.0, 0.0]])
def test_set_weights(self):
x = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer_1 = optimizers.Adam()
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]])
optimizer_1.apply_gradients(zip([grads], [x]))
optimizer_2 = optimizers.Adam()
with self.assertRaisesRegex(ValueError, "You are calling*"):
optimizer_2.set_weights(optimizer_1.variables)
optimizer_2.build([x])
optimizer_2.set_weights(optimizer_1.variables)
for i in range(len(optimizer_1.variables)):
self.assertAllClose(
optimizer_1.variables[i],
optimizer_2.variables[i],
)
def test_gradient_accumulation(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(
learning_rate=1.0, gradient_accumulation_steps=3
)
self.assertEqual(optimizer.gradient_accumulation_steps, 3)
# Iteration 1
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(optimizer._iterations, 1)
self.assertAllClose(optimizer.iterations, 0)
# Iteration 2
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[2.0, 2.0], [2.0, 2.0]]
)
self.assertAllClose(optimizer._iterations, 2)
self.assertAllClose(optimizer.iterations, 0)
# Iteration 3
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(optimizer._iterations, 3)
self.assertAllClose(optimizer.iterations, 1)
# Iteration 4
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(optimizer._iterations, 4)
self.assertAllClose(optimizer.iterations, 1)
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="Requires TF")
def test_tf_checkpointing(self):
import tensorflow as tf
model = models.Sequential([layers.Dense(2)])
optimizer = optimizers.Adam()
x, y = np.random.random((1, 2)), np.random.random((1, 2))
model.compile(optimizer, "mse")
model.train_on_batch(x, y)
ref_pred = model.predict(x)
# Both model and optimizer are Trackables
checkpoint = tf.train.Checkpoint(model, optimizer=optimizer)
temp_filepath = os.path.join(self.get_temp_dir(), "tf_ckpt")
save_path = checkpoint.save(temp_filepath)
# Keep training the model (predictions now differ)
model.train_on_batch(x, y)
pred = model.predict(x)
self.assertNotAllClose(pred, ref_pred, atol=1e-3)
# Restore the model and check prediction correctness
checkpoint.restore(save_path)
pred = model.predict(x)
self.assertAllClose(pred, ref_pred, atol=1e-5)
def test_callable_learning_rate(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=lambda: 0.1)
self.assertAllClose(optimizer.iterations, 0)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.9, 1.9], [2.9, 3.9]])
self.assertAllClose(optimizer.iterations, 1)
def test_overwrite_with_gradient(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
v.overwrite_with_gradient = True
v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
grads2 = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=1.0)
optimizer.apply_gradients([(grads, v), (grads2, v2)])
# `v` is overwritten by its gradient but `v2` is updated normally
self.assertAllClose(v, [[1.0, 1.0], [1.0, 1.0]])
self.assertAllClose(v2, [[0.0, 1.0], [2.0, 3.0]])
def test_overwrite_with_gradient_with_gradient_accumulation(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
v.overwrite_with_gradient = True
v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grad_ones = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
grad_twos = backend.convert_to_tensor([[2.0, 2.0], [2.0, 2.0]])
optimizer = optimizers.SGD(
learning_rate=1.0, gradient_accumulation_steps=2
)
# Iteration 1
optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)])
self.assertAllClose(optimizer._iterations, 1)
self.assertAllClose(optimizer.iterations, 0)
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(v2, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]]
)
# Iteration 2
optimizer.apply_gradients([(grad_twos, v), (grad_twos, v2)])
self.assertAllClose(optimizer._iterations, 2)
self.assertAllClose(optimizer.iterations, 1)
self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]])
self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[0.0, 0.0], [0.0, 0.0]]
)
# Iteration 3
optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)])
self.assertAllClose(optimizer._iterations, 3)
self.assertAllClose(optimizer.iterations, 1)
self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]])
self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]]
)
@parameterized.parameters(
[
("adam",),
("sgd",),
("adamw",),
("adagrad",),
("rmsprop",),
("adadelta",),
("adamax",),
("lion",),
("nadam",),
("ftrl",),
("adafactor",),
]
)
def test_gradient_accumulation_with_weigth_decay(self, optimizer):
optimizer1 = optimizers.get(
{"class_name": optimizer, "config": {"weight_decay": 0.05}}
)
optimizer3 = optimizers.get(
{
"class_name": optimizer,
"config": {
"weight_decay": 0.05,
"gradient_accumulation_steps": 3,
},
}
)
variable1 = backend.Variable([[0.9], [0.5]])
variable3 = backend.Variable([[0.9], [0.5]])
for epoch in range(8):
grads3 = np.random.random([3, 2, 1]).astype("float32")
grads1 = backend.convert_to_tensor(grads3.mean(axis=0))
optimizer1.apply_gradients([(grads1, variable1)])
for batch in range(3):
grads3_ = backend.convert_to_tensor(grads3[batch])
optimizer3.apply_gradients([(grads3_, variable3)])
self.assertAllClose(variable1, variable3)
def test_setting_lr_to_callable_untracks_lr_var(self):
adam = optimizers.Adam(learning_rate=0.001)
self.assertLen(adam.variables, 2)
adam.learning_rate = optimizers.schedules.PolynomialDecay(
adam.learning_rate, 4
)
self.assertLen(adam.variables, 1)
@parameterized.parameters(
[
("adam",),
("sgd",),
("adamw",),
("adagrad",),
("rmsprop",),
("adadelta",),
("adamax",),
("lion",),
("nadam",),
("ftrl",),
("adafactor",),
]
)
def test_pickleable_optimizers(self, optimizer):
optimizer = optimizers.get(optimizer)
reloaded = pickle.loads(pickle.dumps(optimizer))
self.assertEqual(optimizer.get_config(), reloaded.get_config())
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The tf.Variable test can only run with TensorFlow backend.",
)
def test_mixed_with_tf_variables(self):
import tensorflow as tf
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
tf_v = tf.Variable([[1.0, 2.0], [3.0, 4.0]])
tf_grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.Adam(learning_rate=1.0)
optimizer.apply_gradients([(grads, v), (tf_grads, tf_v)])
self.assertAllClose(optimizer.iterations, 1)
# Test with no grads
with self.assertWarnsRegex(
UserWarning, "Gradients do not exist for variables"
):
optimizer.apply_gradients([(grads, v), (None, tf_v)])
self.assertAllClose(optimizer.iterations, 2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adamw_test.py | keras/src/optimizers/adamw_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adamw import AdamW
class AdamWTest(testing.TestCase):
def test_config(self):
optimizer = AdamW(
learning_rate=0.5,
weight_decay=0.008,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = AdamW(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4980, 1.4960, 2.494, 3.492], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_weight_decay_is_none(self):
with self.assertRaisesRegex(
ValueError,
"Argument `weight_decay` must be a float. "
"Received: weight_decay=None",
):
AdamW(learning_rate=1.0, weight_decay=None)
def test_correctness_with_golden(self):
optimizer = AdamW(learning_rate=1.0, weight_decay=0.5, epsilon=2)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998],
[0.2486, 0.2475, 0.2463, 0.2451, 0.244, 0.2428, 0.2417, 0.2405, 0.2394, 0.2382],
[0.1223, 0.1198, 0.1174, 0.1149, 0.1124, 0.11, 0.1075, 0.1051, 0.1027, 0.1003],
[0.0586, 0.0549, 0.0512, 0.0475, 0.0439, 0.0402, 0.0366, 0.033, 0.0294, 0.0258],
[0.0263, 0.0215, 0.0167, 0.012, 0.0073, 0.0026, -0.0021, -0.0067, -0.0113, -0.0159]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = AdamW(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = AdamW(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/ftrl_test.py | keras/src/optimizers/ftrl_test.py | # flake8: noqa
import numpy as np
from unittest import mock
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Ftrl(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.2218, 1.3954, 2.3651, 2.8814], rtol=1e-4, atol=1e-4
)
def test_correctness_with_golden(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.0034, -0.0077, -0.0118, -0.0157, -0.0194, -0.023, -0.0263, -0.0294, -0.0325, -0.0354],
[-0.0078, -0.0162, -0.0242, -0.0317, -0.0387, -0.0454, -0.0516, -0.0575, -0.0631, -0.0685],
[-0.0121, -0.0246, -0.0363, -0.0472, -0.0573, -0.0668, -0.0757, -0.0842, -0.0922, -0.0999],
[-0.0164, -0.0328, -0.0481, -0.0623, -0.0753, -0.0875, -0.099, -0.1098, -0.1201, -0.1299]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Ftrl(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Ftrl(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_invalid_initial_accumulator_value(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`initial_accumulator_value` needs to be positive or zero. Received: initial_accumulator_value={invalid_value}.$",
):
Ftrl(initial_accumulator_value=invalid_value)
def test_invalid_learning_rate_power(self):
invalid_value = 0.1
with self.assertRaisesRegex(
ValueError,
f"^`learning_rate_power` needs to be negative or zero. Received: learning_rate_power={invalid_value}.$",
):
Ftrl(learning_rate_power=invalid_value)
def test_invalid_l1_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l1_regularization_strength` needs to be positive or zero. Received: l1_regularization_strength={invalid_value}.$",
):
Ftrl(l1_regularization_strength=invalid_value)
def test_invalid_l2_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l2_regularization_strength` needs to be positive or zero. Received: l2_regularization_strength={invalid_value}.$",
):
Ftrl(l2_regularization_strength=invalid_value)
def test_invalid_l2_shrinkage_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l2_shrinkage_regularization_strength` needs to be positive or zero. Received: l2_shrinkage_regularization_strength={invalid_value}.$",
):
Ftrl(l2_shrinkage_regularization_strength=invalid_value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adamax.py | keras/src/optimizers/adamax.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-order
gradient-based optimization method. Due to its capability of adjusting the
learning rate based on data characteristics, it is suited to learn
time-variant process, e.g., speech data with dynamically changed noise
conditions. Default parameters follow those provided in the paper (see
references below).
Initialization:
```python
m = 0 # Initialize initial 1st moment vector
u = 0 # Initialize the exponentially weighted infinity norm
t = 0 # Initialize timestep
```
The update rule for parameter `w` with gradient `g` is described at the end
of section 7.1 of the paper (see the reference section):
```python
t += 1
m = beta1 * m + (1 - beta) * g
u = max(beta2 * u, abs(g))
current_lr = learning_rate / (1 - beta1 ** t)
w = w - current_lr * m / (u + epsilon)
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamax",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Adamax optimizer has 2 types of variables: momentums (denoted as m),
exponentially weighted infinity norm (denoted as u).
Args:
var_list: list of model variables to build Adamax variables on.
"""
if self.built:
return
super().build(var_list)
self._m, self._u = self.add_optimizer_variables(
var_list, ["momentum", "norm"]
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
m = self._m[self._get_variable_index(variable)]
u = self._u[self._get_variable_index(variable)]
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), (1 - self.beta_1))
)
self.assign(
u, ops.maximum(ops.multiply(self.beta_2, u), ops.abs(gradient))
)
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, m),
ops.multiply((1 - beta_1_power), ops.add(u, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Adamax.__doc__ = Adamax.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adam.py | keras/src/optimizers/adam.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adam"])
class Adam(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to `1e-7`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond". Defaults
to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adam",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.amsgrad = amsgrad
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums, self._velocities = self.add_optimizer_variables(
var_list, ["momentum", "velocity"]
)
if self.amsgrad:
self._velocity_hats = self.add_optimizer_variables(
var_list, "velocity_hat"
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
beta_2_power = ops.power(
ops.cast(self.beta_2, variable.dtype), local_step
)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)
)
self.assign_add(
v,
ops.multiply(
ops.subtract(ops.square(gradient), v), 1 - self.beta_2
),
)
if self.amsgrad:
v_hat = self._velocity_hats[self._get_variable_index(variable)]
self.assign(v_hat, ops.maximum(v_hat, v))
v = v_hat
self.assign_sub(
variable,
ops.divide(
ops.multiply(m, alpha), ops.add(ops.sqrt(v), self.epsilon)
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
Adam.__doc__ = Adam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/lion_test.py | keras/src/optimizers/lion_test.py | import numpy as np
import pytest
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.lion import Lion
class LionTest(testing.TestCase):
def test_invalid_beta_1(self):
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=-0.1.",
):
Lion(beta_1=-0.1)
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=0.0.",
):
Lion(beta_1=0.0)
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=1.1.",
):
Lion(beta_1=1.1)
def test_config(self):
optimizer = Lion(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Lion(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Lion()
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.998], [0.997], [0.996], [0.995]],
(1, 10),
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Lion(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Lion(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras.Sequential([keras.layers.Dense(10)])
model.compile(optimizer=Lion(use_ema=True), loss="mse")
x = keras.ops.zeros((1, 5))
y = keras.ops.zeros((1, 10))
model.fit(x, y)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adadelta_test.py | keras/src/optimizers/adadelta_test.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adadelta import Adadelta
class AdadeltaTest(testing.TestCase):
def test_config(self):
optimizer = Adadelta(
learning_rate=0.5,
rho=0.9,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adadelta(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.9993, 1.9993, 2.9993, 3.9993], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adadelta(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adadelta(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adadelta(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adadelta(learning_rate=1.0, rho=0.8, epsilon=1e-6)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.9978], [0.9947], [0.9915], [0.9882], [0.9849]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adadelta(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adadelta(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/muon.py | keras/src/optimizers/muon.py | import re
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Muon"])
class Muon(optimizer.Optimizer):
"""Optimizer that implements the Muon algorithm.
Note that this optimizer should not be used in the following layers:
1. Embedding layer
2. Final output fully connected layer
3. Any {0,1}-D variables
These should all be optimized using AdamW.
The Muon optimizer can use both the Muon update step or the
AdamW update step based on the following:
- For any variable that isn't 2D, the AdamW step
will be used. This is not configurable.
- If the argument `exclude_embeddings` (defaults to `True`) is set
to `True`, the AdamW step will be used.
- For any variablewith a name that matches an expression
listed in the argument `exclude_layers` (a list), the
AdamW step will be used.
- Any other variable uses the Muon step.
Typically, you only need to pass the name of your densely-connected
output layer to `exclude_layers`, e.g.
`exclude_layers=["output_dense"]`.
References:
- [Original implementation](https://github.com/KellerJordan/Muon)
- [Liu et al, 2025](https://arxiv.org/abs/2502.16982)
Args:
learning_rate: A float,
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
adam_beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use.
The exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
adam_beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use.
The exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
adam_weight_decay: Float. If set, weight decay is applied when using
the Adam optimizer.
epsilon: A small constant for numerical stability. This is
"epsilon hat" in the Kingma and Ba paper
(in the formula just before Section 2.1),
not the epsilon in Algorithm 1 of the paper.
It be used at Adamw.Defaults to `1e-7`.
exclude_layers: List of strings, keywords of layer names to exclude.
All layers with keywords in their path will use adamw.
exclude_embeddings: Boolean value
If True, embedding layers will use adamw.
muon_a: Float, parameter a of the muon algorithm.
It is recommended to use the default value
muon_b: Float, parameter b of the muon algorithm.
It is recommended to use the default value
muon_c: Float, parameter c of the muon algorithm.
It is recommended to use the default value
adam_lr_ratio: Float, the ratio of the learning rate when
using Adam to the main learning rate.
It is recommended to set it to 1
momentum: Float, momentum used by internal SGD.
ns_steps: Integer, number of Newton-Schulz iterations to run.
nesterov: Boolean, whether to use Nesterov-style momentum
{{base_optimizer_keyword_args}}
rms_rate: Float. A parameter from https://arxiv.org/abs/2502.16982
that can enhance the stability of Muon, allowing it to use the
same learning rate and weight decay as Adam. Defaults to `0.2`.
Set to `None` to disable this feature.
"""
def __init__(
self,
learning_rate=0.001,
adam_beta_1=0.9,
adam_beta_2=0.999,
adam_weight_decay=0.004,
epsilon=1e-7,
weight_decay=0.004,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="muon",
exclude_layers=None,
exclude_embeddings=True,
muon_a=3.4445,
muon_b=-4.7750,
muon_c=2.0315,
adam_lr_ratio=1,
momentum=0.95,
ns_steps=5,
nesterov=True,
rms_rate=0.2,
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.adam_beta_1 = adam_beta_1
self.adam_beta_2 = adam_beta_2
self.epsilon = epsilon
self.muon_a = muon_a
self.muon_b = muon_b
self.muon_c = muon_c
self.adam_lr_ratio = adam_lr_ratio
self.momentum = momentum
self.ns_steps = ns_steps
self.nesterov = nesterov
self.exclude_embeddings = exclude_embeddings
self.exclude_layers = exclude_layers or []
self.adam_weight_decay = adam_weight_decay
self.rms_rate = rms_rate
def _should_use_adamw(self, variable):
# it works well to just flatten their last 3 dimensions.
# any {0,1}-D parameters should all be optimized by adam
if len(variable.shape) != 2:
return True
if self.exclude_embeddings and "embedding" in variable.path.lower():
return True
for keyword in self.exclude_layers:
if re.search(keyword, variable.path):
return True
return False
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
# Momentums are for both Muon and Adam
self.momentums = [None] * len(var_list)
# Velocities are just for Adam
self.adam_velocities = [None] * len(var_list)
for var in var_list:
if not self._overwrite_variable_with_gradient(var):
self.momentums[self._get_variable_index(var)] = (
self.add_variable_from_reference(
reference_variable=var, name="momentum"
)
)
if self._should_use_adamw(var):
self.adam_velocities[self._get_variable_index(var)] = (
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
def update_step(self, gradient, variable, learning_rate):
variable_index = self._get_variable_index(variable)
m = self.momentums[variable_index]
v = self.adam_velocities[variable_index]
# The presence of the velocity tells us that this variable is for Adam
if v is not None:
# It should be noted that lr is one-tenth when using adamw.
self._adamw_update_step(
gradient, variable, learning_rate * self.adam_lr_ratio, m, v
)
else:
self._muon_update_step(gradient, variable, learning_rate, m)
def _muon_update_step(self, gradient, variable, lr, m):
self.assign_add(m, ops.add(gradient, m * (self.momentum - 1)))
if self.nesterov:
g = ops.add(gradient, self.momentum * m)
else:
g = m
update = self.zeropower_via_newtonschulz5(g, self.ns_steps)
self.assign_sub(variable, self.lr_adjust(lr * update))
def _adamw_update_step(self, gradient, variable, learning_rate, m, v):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
adam_beta_1_power = ops.power(
ops.cast(self.adam_beta_1, variable.dtype), local_step
)
adam_beta_2_power = ops.power(
ops.cast(self.adam_beta_2, variable.dtype), local_step
)
alpha = lr * ops.sqrt(1 - adam_beta_2_power) / (1 - adam_beta_1_power)
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), 1 - self.adam_beta_1)
)
self.assign_add(
v,
ops.multiply(
ops.subtract(ops.square(gradient), v), 1 - self.adam_beta_2
),
)
self.assign_sub(
variable,
ops.divide(
ops.multiply(m, alpha), ops.add(ops.sqrt(v), self.epsilon)
),
)
def transpose_last_axis(self, X):
shape = ops.shape(X)
temp_order = list(range(len(shape)))
temp_order[-2] = temp_order[-1]
temp_order[-1] = len(shape) - 2
X = ops.transpose(X, temp_order)
return X
def lr_adjust(self, x):
"""Adjusts learning rate based on the Moonlight implementation.
This method enhances the stability of Muon, allowing it to use the same
learning rate and weight decay as Adam. For details, see
https://arxiv.org/abs/2502.16982.
For a 2D matrix, the update is scaled by `sqrt(max(n, m)) * rms_rate`,
where `n` and `m` are the dimensions of the matrix.
"""
if self.rms_rate is None:
return x
# moonlight version
# https://github.com/MoonshotAI/Moonlight/blob/master/examples/toy_train.py
return x * ops.sqrt(ops.maximum(x.shape[0], x.shape[1])) * self.rms_rate
def zeropower_via_newtonschulz5(self, x, steps: int):
"""We apply the Newton-Schulz iteration to compute matrix G.
We select a quintic iteration that maximizes the slope at zero. This
approach helps minimize steps, even if the iteration doesn't fully
converge across the interval. The result isn't exactly UV^T (from the
SVD of G), but rather an approximation like US'V^T. Despite this
approximation, model performance remains unaffected compared to using
the exact UV^T from the SVD.
"""
shape = ops.shape(x)
assert len(shape) >= 2
a, b, c = self.muon_a, self.muon_b, self.muon_c
if shape[-2] > shape[-1]:
x = self.transpose_last_axis(x)
# Ensure spectral norm is at most 1
x = x / (ops.norm(x, axis=(-2, -1), keepdims=True) + 1e-7)
# Perform the NS iterations
for _ in range(steps):
temp_a = x @ self.transpose_last_axis(x)
temp_b = b * temp_a + c * temp_a @ temp_a
x = a * x + temp_b @ x
if shape[-2] > shape[-1]:
x = self.transpose_last_axis(x)
return x
def _apply_weight_decay(self, variables):
for variable in variables:
if not self._use_weight_decay(variable):
continue
if self._should_use_adamw(variable):
weight_decay_value = self.adam_weight_decay
else:
weight_decay_value = self.weight_decay
if weight_decay_value is None:
continue
wd = ops.cast(weight_decay_value, variable.dtype)
lr = ops.cast(self.learning_rate, variable.dtype)
variable.assign(variable - variable * wd * lr)
def get_config(self):
config = super().get_config()
config.update(
{
"adam_beta_1": self.adam_beta_1,
"adam_beta_2": self.adam_beta_2,
"epsilon": self.epsilon,
"exclude_layers": self.exclude_layers,
"muon_a": self.muon_a,
"muon_b": self.muon_b,
"muon_c": self.muon_c,
"adam_lr_ratio": self.adam_lr_ratio,
"momentum": self.momentum,
"ns_steps": self.ns_steps,
"nesterov": self.nesterov,
"exclude_embeddings": self.exclude_embeddings,
"adam_weight_decay": self.adam_weight_decay,
"rms_rate": self.rms_rate,
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/nadam_test.py | keras/src/optimizers/nadam_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.nadam import Nadam
class NadamTest(testing.TestCase):
def test_config(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_build_with_empty_var_list(self):
optimizer = Nadam()
optimizer.build([])
self.assertEqual(optimizer._u_product.dtype, backend.floatx())
def test_single_step(self):
optimizer = Nadam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4686, 1.4686, 2.4686, 3.4686], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281],
[-0.1738, -0.1731, -0.1726, -0.1723, -0.1721, -0.172, -0.1719, -0.1718, -0.1718, -0.1717],
[-0.7115, -0.7103, -0.7096, -0.7092, -0.709, -0.7088, -0.7086, -0.7085, -0.7085, -0.7084],
[-1.2335, -1.2322, -1.2313, -1.2309, -1.2306, -1.2304, -1.2302, -1.2301, -1.23, -1.2299],
[-1.7492, -1.7478, -1.7469, -1.7464, -1.7461, -1.7459, -1.7457, -1.7456, -1.7455, -1.7454]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Nadam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Nadam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/optimizer.py | keras/src/optimizers/optimizer.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.optimizers import base_optimizer
if backend.backend() == "tensorflow":
from keras.src.backend.tensorflow.optimizer import (
TFOptimizer as BackendOptimizer,
)
elif backend.backend() == "torch":
from keras.src.backend.torch.optimizers import (
TorchOptimizer as BackendOptimizer,
)
elif backend.backend() == "jax":
from keras.src.backend.jax.optimizer import JaxOptimizer as BackendOptimizer
else:
class BackendOptimizer(base_optimizer.BaseOptimizer):
pass
@keras_export(["keras.Optimizer", "keras.optimizers.Optimizer"])
class Optimizer(BackendOptimizer, base_optimizer.BaseOptimizer):
pass
Optimizer.__doc__ = base_optimizer.BaseOptimizer.__doc__
base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adagrad.py | keras/src/optimizers/adagrad.py | from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adagrad"])
class Adagrad(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adagrad`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use `1.0`.
initial_accumulator_value: Floating point value. Starting value for the
accumulators (per-parameter momentum values). Must be non-negative.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
def __init__(
self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adagrad",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
initializer = initializers.Constant(self.initial_accumulator_value)
self._accumulators = self.add_optimizer_variables(
var_list, "accumulator", initializer=initializer
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accumulator = self._accumulators[self._get_variable_index(variable)]
self.assign_add(accumulator, ops.square(gradient))
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, gradient),
ops.sqrt(ops.add(accumulator, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"initial_accumulator_value": self.initial_accumulator_value,
"epsilon": self.epsilon,
}
)
return config
Adagrad.__doc__ = Adagrad.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adamw.py | keras/src/optimizers/adamw.py | from keras.src.api_export import keras_export
from keras.src.optimizers import adam
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.AdamW"])
class AdamW(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments with an added
method to decay weights per the techniques discussed in the paper,
'Decoupled Weight Decay Regularization' by
[Loshchilov, Hutter et al., 2019](https://arxiv.org/abs/1711.05101).
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the underlying Adam method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates.
Defaults to `0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just
before Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to 1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond".
Defaults to `False`.
{{base_optimizer_keyword_args}}
References:
- [Loshchilov et al., 2019](https://arxiv.org/abs/1711.05101)
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) for `adam`
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
"""
def __init__(
self,
learning_rate=0.001,
weight_decay=0.004,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamw",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if self.weight_decay is None:
raise ValueError(
"Argument `weight_decay` must be a float. Received: "
"weight_decay=None"
)
AdamW.__doc__ = AdamW.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adadelta.py | keras/src/optimizers/adadelta.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads, self._accumulated_delta_vars = (
self.add_optimizer_variables(
var_list, ["accumulated_grad", "accumulated_delta_var"]
)
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(ops.add(x, self.epsilon))
self.assign(
accumulated_grad,
ops.add(
rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad))
),
)
delta_var = ops.negative(
ops.divide(
ops.multiply(rms(accumulated_delta_var), grad),
rms(accumulated_grad),
)
)
self.assign(
accumulated_delta_var,
ops.add(
ops.multiply(rho, accumulated_delta_var),
ops.multiply(1 - rho, ops.square(delta_var)),
),
)
self.assign_add(variable, ops.multiply(lr, delta_var))
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/optimizer_sparse_test.py | keras/src/optimizers/optimizer_sparse_test.py | from unittest import mock
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
class ScatterUpdateOptimizer(optimizers.Optimizer):
def __init__(self):
super().__init__(learning_rate=0.001)
def build(self, variables):
if self.built:
return
super().build(variables)
self.momentums = [
self.add_variable_from_reference(v, name="momentum")
for v in variables
]
def update_step(self, grad, variable, learning_rate):
momentum = self.momentums[self._get_variable_index(variable)]
self.assign(momentum, ops.cast(grad, momentum.dtype))
self.assign(variable, ops.cast(grad, variable.dtype))
TEST_CASES = [
{
"testcase_name": "adadelta",
"optimizer_class": optimizers.Adadelta,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "adafactor",
"optimizer_class": optimizers.Adafactor,
"init_kwargs": {"clip_threshold": 0.5},
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "adagrad",
"optimizer_class": optimizers.Adagrad,
"expect_model_sparse_variable_updates": True,
"expect_optimizer_sparse_variable_updates": True,
},
{
"testcase_name": "adam",
"optimizer_class": optimizers.Adam,
},
{
"testcase_name": "adam_amsgrad",
"optimizer_class": optimizers.Adam,
"init_kwargs": {"amsgrad": True},
},
{
"testcase_name": "adamax",
"optimizer_class": optimizers.Adamax,
},
{
"testcase_name": "adamw",
"optimizer_class": optimizers.AdamW,
},
{
"testcase_name": "adamw_amsgrad",
"optimizer_class": optimizers.AdamW,
"init_kwargs": {"amsgrad": True},
},
{
"testcase_name": "ftrl",
"optimizer_class": optimizers.Ftrl,
},
{
"testcase_name": "lion",
"optimizer_class": optimizers.Lion,
},
{
"testcase_name": "loss_scale_optimizer_sgd",
"optimizer_class": lambda: optimizers.LossScaleOptimizer(
optimizers.SGD(learning_rate=0.5)
),
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "nadam",
"optimizer_class": optimizers.Nadam,
},
{
"testcase_name": "rmsprop",
"optimizer_class": optimizers.RMSprop,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "rmsprop_momentum",
"optimizer_class": optimizers.RMSprop,
"init_kwargs": {"momentum": 0.05},
},
{
"testcase_name": "rmsprop_momentum_centered",
"optimizer_class": optimizers.RMSprop,
"init_kwargs": {"momentum": 0.05, "centered": True},
},
{
"testcase_name": "sgd",
"optimizer_class": optimizers.SGD,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "sgd_momentum",
"optimizer_class": optimizers.SGD,
"init_kwargs": {"momentum": 0.05},
},
{
"testcase_name": "sgd_momentum_nesterov",
"optimizer_class": optimizers.SGD,
"init_kwargs": {"momentum": 0.05, "nesterov": True},
},
{
"testcase_name": "scatter_update",
"optimizer_class": ScatterUpdateOptimizer,
"expect_model_sparse_variable_updates": True,
"expect_optimizer_sparse_variable_updates": True,
},
]
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
class OptimizerSparseTest(testing.TestCase):
@parameterized.named_parameters(TEST_CASES)
def test_sparse_gradients(
self,
optimizer_class,
init_kwargs={},
expect_model_sparse_variable_updates=False,
expect_optimizer_sparse_variable_updates=False,
):
# This test verifies that:
# - Optimizers use Keras ops everywhere instead of native operators
# (e.g. `ops.add()` instead of `+`) where sparse gradients are handled
# - The used ops handle sparse gradients
# - Optimizers use `self.assign/assign_add/assign_sub` instead of
# calling the method on the variable directly. Otherwise, the sparse
# updates are densified before being applied.
# - For some optimizers, a sparse gradient actually results in a sparse
# variable update as per `expect_model_sparse_variable_updates` and
# `expect_optimizer_sparse_variable_updates`
model_variable = backend.Variable(initializer="ones", shape=(5, 10))
optimizer = optimizer_class(**init_kwargs)
# Mocking "tensorflow.Variable" won't work as it gets substituted with
# the resource variable class.
if backend.backend() == "tensorflow":
import tensorflow as tf
grad = tf.IndexedSlices(0.5 * ops.ones((3, 10)), (0, 2, 4), (5, 10))
sparse_class = tf.IndexedSlices
variable_class = model_variable._value.__class__
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad = jax_sparse.BCOO(
(0.5 * ops.ones((3, 10)), ((0,), (2,), (4,))), shape=(5, 10)
)
sparse_class = jax_sparse.JAXSparse
variable_class = model_variable.__class__
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
optimizer_to_patch = (
optimizer.inner_optimizer
if isinstance(optimizer, optimizers.LossScaleOptimizer)
else optimizer
)
model_sparse_variable_updates = False
optimizer_sparse_variable_updates = False
def mock_optimizer_assign(variable, value):
nonlocal model_sparse_variable_updates
nonlocal optimizer_sparse_variable_updates
if isinstance(variable, backend.Variable):
variable = variable._value
if isinstance(value, sparse_class):
if variable is model_variable._value:
model_sparse_variable_updates = True
elif any(variable is v._value for v in optimizer.variables):
optimizer_sparse_variable_updates = True
def mock_variable_assign(variable, value):
# Make an exception for scalar variables
if len(variable.shape):
pytest.fail(
"Optimizer is calling `assign`, `assign_add` or "
"`assign_sub` directly on a variable. Use "
"`self.assign/assign_add/assign_sub(variable, value)` "
"instead to support sparse updates."
)
# patch "_apply_weight_decay" to exclude this special case.
# patch the optimizer "assign" methods to detect sparse updates.
# patch the tf.Variable "assign" methods to detect direct assign calls.
with (
mock.patch.object(
optimizer_to_patch, "_apply_weight_decay", autospec=True
),
mock.patch.object(
optimizer_to_patch, "assign", autospec=True
) as optimizer_assign,
mock.patch.object(
optimizer_to_patch, "assign_add", autospec=True
) as optimizer_assign_add,
mock.patch.object(
optimizer_to_patch, "assign_sub", autospec=True
) as optimizer_assign_sub,
mock.patch.object(
variable_class, "assign", autospec=True
) as variable_assign,
mock.patch.object(
variable_class, "assign_add", autospec=True
) as variable_assign_add,
mock.patch.object(
variable_class, "assign_sub", autospec=True
) as variable_assign_sub,
):
optimizer_assign.side_effect = mock_optimizer_assign
optimizer_assign_add.side_effect = mock_optimizer_assign
optimizer_assign_sub.side_effect = mock_optimizer_assign
variable_assign.side_effect = mock_variable_assign
variable_assign_add.side_effect = mock_variable_assign
variable_assign_sub.side_effect = mock_variable_assign
optimizer.apply([grad], [model_variable])
self.assertEqual(
model_sparse_variable_updates, expect_model_sparse_variable_updates
)
self.assertEqual(
optimizer_sparse_variable_updates,
expect_optimizer_sparse_variable_updates,
)
@parameterized.named_parameters(TEST_CASES)
def test_sparse_correctness(
self, optimizer_class, init_kwargs={}, **kwargs
):
# This test verifies that applying a sparse gradient gives the same
# numerical results as the same dense gradient.
optimizer_sparse = optimizer_class(**init_kwargs)
optimizer_dense = optimizer_class(**init_kwargs)
var_sparse = backend.Variable(initializer="ones", shape=(5, 3, 2))
var_dense = backend.Variable(initializer="ones", shape=(5, 3, 2))
stateless = backend.backend() == "jax"
if stateless:
optimizer_sparse.build([var_sparse])
optimizer_dense.build([var_dense])
optimizer_sparse_vars = optimizer_sparse.variables
optimizer_dense_vars = optimizer_dense.variables
var_sparse_values = [var_sparse.value]
var_dense_values = [var_dense.value]
for i in range(5):
if backend.backend() == "tensorflow":
import tensorflow as tf
grad_sparse = tf.IndexedSlices(
values=ops.ones((3, 3, 2)) * (10.0 - i),
indices=(0, 2, 4),
dense_shape=(5, 3, 2),
)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad_sparse = jax_sparse.BCOO(
(ops.ones((3, 3, 2)) * (10.0 - i), ((0,), (2,), (4,))),
shape=(5, 3, 2),
)
else:
self.fail(
f"Sparse is unsupported with backend {backend.backend()}"
)
grad_dense = ops.convert_to_tensor(grad_sparse, sparse=False)
if stateless:
(
var_sparse_values,
optimizer_sparse_vars,
) = optimizer_sparse.stateless_apply(
optimizer_sparse_vars, [grad_sparse], var_sparse_values
)
(
var_dense_values,
optimizer_dense_vars,
) = optimizer_dense.stateless_apply(
optimizer_dense_vars, [grad_dense], var_dense_values
)
self.assertAllClose(var_sparse_values[0], var_dense_values[0])
else:
optimizer_sparse.apply([grad_sparse], [var_sparse])
optimizer_dense.apply([grad_dense], [var_dense])
self.assertAllClose(var_sparse.value, var_dense.value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adam_test.py | keras/src/optimizers/adam_test.py | import numpy as np
import pytest
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adam import Adam
class AdamTest(testing.TestCase):
def test_config(self):
optimizer = Adam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adam(amsgrad=True)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras.Sequential([keras.layers.Dense(10)])
model.compile(optimizer=Adam(use_ema=True), loss="mse")
x = keras.ops.zeros((1, 5))
y = keras.ops.zeros((1, 10))
model.fit(x, y)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The IndexedSlices test can only run with TF backend.",
)
def test_clipnorm_indexed_slices(self):
# https://github.com/keras-team/keras/issues/18985
model = keras.Sequential(
[
keras.layers.Embedding(10, 4),
keras.layers.Flatten(),
keras.layers.Dense(2),
]
)
model.compile(optimizer=Adam(clipnorm=100), loss="mse")
x = keras.ops.ones((8, 5))
y = keras.ops.zeros((8, 2))
model.fit(x, y, verbose=0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/rmsprop_test.py | keras/src/optimizers/rmsprop_test.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.rmsprop import RMSprop
class RMSpropTest(testing.TestCase):
def test_config(self):
optimizer = RMSprop(
learning_rate=0.5,
rho=0.8,
momentum=0.05,
epsilon=1e-6,
centered=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = RMSprop(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.5811, 0.4189, 1.4189, 2.4189], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = RMSprop(centered=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.9967], [0.9933], [0.9908], [0.9885], [0.9864]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = RMSprop(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = RMSprop(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adafactor_test.py | keras/src/optimizers/adafactor_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.adafactor import Adafactor
class AdafactorTest(testing.TestCase):
def test_config(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
self.run_class_serialization_test(optimizer)
def test_single_step_1d(self):
optimizer = Adafactor(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.3693, 0.6307, 1.6307, 2.6307], rtol=1e-4, atol=1e-4
)
def test_single_step_2d(self):
optimizer = Adafactor(learning_rate=0.5)
grads = np.array([[1.0, 6.0], [7.0, 2.0]])
vars = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [[0.7007, -0.0081], [1.2492, 3.4407]], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
np.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55],
[0.3031, 0.3026, 0.3025, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024],
[0.1671, 0.1665, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663],
[0.0923, 0.0916, 0.0915, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914],
[0.0554, 0.0548, 0.0546, 0.0546, 0.0546, 0.0546, 0.0546, 0.0545, 0.0545, 0.0545]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adafactor(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adafactor(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/nadam.py | keras/src/optimizers/nadam.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Nadam"])
class Nadam(optimizer.Optimizer):
"""Optimizer that implements the Nadam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to `1e-7`.
{{base_optimizer_keyword_args}}
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="nadam",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Nadam optimizer has 2 types of variables: momentums and velocities.
Args:
var_list: list of model variables to build Nadam variables on.
"""
if self.built:
return
if var_list:
dtype = var_list[0].dtype
else:
dtype = backend.floatx()
super().build(var_list)
self._momentums, self._velocities = self.add_optimizer_variables(
var_list, ["momentum", "velocity"]
)
self._u_product = backend.Variable(1.0, dtype=dtype)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
dtype = self._u_product.dtype
self.assign(
self._u_product,
self._u_product
* self.beta_1
* (
1.0
- 0.5 * ops.power(0.96, ops.cast(self.iterations + 1, dtype))
),
)
super()._backend_update_step(grads, trainable_variables, learning_rate)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
var_dtype = variable.dtype
lr = ops.cast(learning_rate, var_dtype)
gradient = ops.cast(gradient, var_dtype)
local_step = ops.cast(self.iterations + 1, var_dtype)
next_step = ops.cast(self.iterations + 2, var_dtype)
decay = ops.cast(0.96, var_dtype)
beta_1 = ops.cast(self.beta_1, var_dtype)
beta_2 = ops.cast(self.beta_2, var_dtype)
u_t = beta_1 * (1.0 - 0.5 * (ops.power(decay, local_step)))
u_t_1 = beta_1 * (1.0 - 0.5 * (ops.power(decay, next_step)))
u_product_t = ops.cast(self._u_product, var_dtype)
u_product_t_1 = u_product_t * u_t_1
beta_2_power = ops.power(beta_2, local_step)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), (1 - beta_1))
)
self.assign_add(
v, ops.multiply(ops.subtract(ops.square(gradient), v), (1 - beta_2))
)
m_hat = ops.add(
ops.divide(ops.multiply(u_t_1, m), 1 - u_product_t_1),
ops.divide(ops.multiply(1 - u_t, gradient), 1 - u_product_t),
)
v_hat = ops.divide(v, (1 - beta_2_power))
self.assign_sub(
variable,
ops.divide(
ops.multiply(m_hat, lr), ops.add(ops.sqrt(v_hat), self.epsilon)
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Nadam.__doc__ = Nadam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/lion.py | keras/src/optimizers/lion.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Lion"])
class Lion(optimizer.Optimizer):
"""Optimizer that implements the Lion algorithm.
The Lion optimizer is a stochastic-gradient-descent method that uses the
sign operator to control the magnitude of the update, unlike other adaptive
optimizers such as Adam that rely on second-order moments. This makes
Lion more memory-efficient as it only keeps track of the momentum. According
to the authors (see reference), its performance gain over Adam grows with
the batch size. Because the update of Lion is produced through the sign
operation, resulting in a larger norm, a suitable learning rate for Lion is
typically 3-10x smaller than that for AdamW. The weight decay for Lion
should in turn be 3-10x larger than that for AdamW to maintain a
similar strength (lr * wd).
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
rate to combine the current gradient and the 1st moment estimate.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimate. Defaults to
`0.99`.
{{base_optimizer_keyword_args}}
References:
- [Chen et al., 2023](http://arxiv.org/abs/2302.06675)
- [Authors' implementation](
http://github.com/google/automl/tree/master/lion)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.99,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="lion",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
if beta_1 <= 0 or beta_1 > 1:
raise ValueError(
"Argument `beta_1` must be in the [0, 1] range. Otherwise, the "
f"optimizer degenerates to SignSGD. Received: beta_1={beta_1}."
)
def build(self, var_list):
"""Initialize optimizer variables.
Lion optimizer has one variable `momentums`.
Args:
var_list: list of model variables to build Lion variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums = self.add_optimizer_variables(var_list, "momentum")
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
beta_1 = ops.cast(self.beta_1, variable.dtype)
beta_2 = ops.cast(self.beta_2, variable.dtype)
m = self._momentums[self._get_variable_index(variable)]
self.assign_sub(
variable,
ops.multiply(
lr,
ops.sign(
ops.add(
ops.multiply(m, beta_1),
ops.multiply(gradient, (1.0 - beta_1)),
)
),
),
)
self.assign(
m,
ops.add(
ops.multiply(m, beta_2), ops.multiply(gradient, (1.0 - beta_2))
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
}
)
return config
Lion.__doc__ = Lion.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/ftrl.py | keras/src/optimizers/ftrl.py | from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Ftrl"])
class Ftrl(optimizer.Optimizer):
r"""Optimizer that implements the FTRL algorithm.
"Follow The Regularized Leader" (FTRL) is an optimization algorithm
developed at Google for click-through rate prediction in the early 2010s. It
is most suitable for shallow models with large and sparse feature spaces.
The algorithm is described by
[McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).
The Keras version has support for both online L2 regularization
(the L2 regularization described in the paper
above) and shrinkage-type L2 regularization
(which is the addition of an L2 penalty to the loss function).
Initialization:
```python
n = 0
sigma = 0
z = 0
```
Update rule for one variable `w`:
```python
prev_n = n
n = n + g ** 2
sigma = (n ** -lr_power - prev_n ** -lr_power) / lr
z = z + g - sigma * w
if abs(z) < lambda_1:
w = 0
else:
w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)
```
Notation:
- `lr` is the learning rate
- `g` is the gradient for the variable
- `lambda_1` is the L1 regularization strength
- `lambda_2` is the L2 regularization strength
- `lr_power` is the power to scale n.
Check the documentation for the `l2_shrinkage_regularization_strength`
parameter for more details when shrinkage is enabled, in which case gradient
is replaced with a gradient with shrinkage.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero
for a fixed learning rate.
initial_accumulator_value: The starting value for accumulators. Only
zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_shrinkage_regularization_strength: A float value, must be greater
than or equal to zero. This differs from L2 above in that the L2
above is a stabilization penalty, whereas this L2 shrinkage is a
magnitude penalty. When input is sparse shrinkage will only happen
on the active weights.
beta: A float value, representing the beta value from the paper.
Defaults to `0.0`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
l2_shrinkage_regularization_strength=0.0,
beta=0.0,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="ftrl",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if initial_accumulator_value < 0.0:
raise ValueError(
"`initial_accumulator_value` needs to be positive or zero. "
"Received: initial_accumulator_value="
f"{initial_accumulator_value}."
)
if learning_rate_power > 0.0:
raise ValueError(
"`learning_rate_power` needs to be negative or zero. Received: "
f"learning_rate_power={learning_rate_power}."
)
if l1_regularization_strength < 0.0:
raise ValueError(
"`l1_regularization_strength` needs to be positive or zero. "
"Received: l1_regularization_strength="
f"{l1_regularization_strength}."
)
if l2_regularization_strength < 0.0:
raise ValueError(
"`l2_regularization_strength` needs to be positive or zero. "
"Received: l2_regularization_strength="
f"{l2_regularization_strength}."
)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"`l2_shrinkage_regularization_strength` needs to be positive "
"or zero. Received: l2_shrinkage_regularization_strength"
f"={l2_shrinkage_regularization_strength}."
)
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength
)
self.beta = beta
def build(self, var_list):
"""Initialize optimizer variables.
Args:
var_list: list of model variables to build Ftrl variables on.
"""
if self.built:
return
super().build(var_list)
accumulator_initializer = initializers.Constant(
self.initial_accumulator_value,
)
self._accumulators, self._linears = self.add_optimizer_variables(
var_list,
["accumulator", "linear"],
initializer=[accumulator_initializer, "zeros"],
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accum = self._accumulators[self._get_variable_index(variable)]
linear = self._linears[self._get_variable_index(variable)]
lr_power = self.learning_rate_power
l2_reg = self.l2_regularization_strength
l2_reg = l2_reg + self.beta / (2.0 * lr)
grad_to_use = ops.add(
gradient,
ops.multiply(
2 * self.l2_shrinkage_regularization_strength, variable
),
)
new_accum = ops.add(accum, ops.square(gradient))
self.assign_add(
linear,
ops.subtract(
grad_to_use,
ops.multiply(
ops.divide(
ops.subtract(
ops.power(new_accum, -lr_power),
ops.power(accum, -lr_power),
),
lr,
),
variable,
),
),
)
quadratic = ops.add(
ops.divide(ops.power(new_accum, (-lr_power)), lr), 2 * l2_reg
)
linear_clipped = ops.clip(
linear,
-self.l1_regularization_strength,
self.l1_regularization_strength,
)
self.assign(
variable,
ops.divide(ops.subtract(linear_clipped, linear), quadratic),
)
self.assign(accum, new_accum)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate_power": self.learning_rate_power,
"initial_accumulator_value": self.initial_accumulator_value,
"l1_regularization_strength": self.l1_regularization_strength,
"l2_regularization_strength": self.l2_regularization_strength,
"l2_shrinkage_regularization_strength": self.l2_shrinkage_regularization_strength, # noqa: E501
"beta": self.beta,
}
)
return config
Ftrl.__doc__ = Ftrl.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/lamb_test.py | keras/src/optimizers/lamb_test.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.lamb import Lamb
class LambTest(testing.TestCase):
def test_config(self):
optimizer = Lamb(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Lamb(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.3693, 0.6306, 1.6306, 2.6306], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Lamb()
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Lamb(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Lamb(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/__init__.py | keras/src/optimizers/__init__.py | from keras.src.api_export import keras_export
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.muon import Muon
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
Optimizer,
Adam,
SGD,
RMSprop,
Adadelta,
AdamW,
Adagrad,
Adamax,
Adafactor,
Nadam,
Ftrl,
Lion,
LossScaleOptimizer,
}
ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS}
@keras_export("keras.optimizers.serialize")
def serialize(optimizer):
"""Returns the optimizer configuration as a Python dict.
Args:
optimizer: An `Optimizer` instance to serialize.
Returns:
Python dict which contains the configuration of the optimizer.
"""
return serialization_lib.serialize_keras_object(optimizer)
@keras_export("keras.optimizers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras optimizer object via its configuration.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras Optimizer instance.
"""
# Make deserialization case-insensitive for built-in optimizers.
if config["class_name"].lower() in ALL_OBJECTS_DICT:
config["class_name"] = config["class_name"].lower()
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.optimizers.get")
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of:
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
Returns:
A Keras Optimizer instance.
"""
if identifier is None:
return None
elif isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": identifier, "config": {}}
obj = deserialize(config)
else:
obj = identifier
if isinstance(obj, Optimizer):
return obj
raise ValueError(f"Could not interpret optimizer identifier: {identifier}")
# We will add this temporarily so that tensorflow packages that depend on
# estimators will continue to import (there are a large number). Note that
# Keras 3 will not work with the estimators API.
@keras_export(
[
"keras.optimizers.legacy.Adagrad",
"keras.optimizers.legacy.Adam",
"keras.optimizers.legacy.Ftrl",
"keras.optimizers.legacy.RMSprop",
"keras.optimizers.legacy.SGD",
"keras.optimizers.legacy.Optimizer",
]
)
class LegacyOptimizerWarning:
def __init__(self, *args, **kwargs):
raise ImportError(
"`keras.optimizers.legacy` is not supported in Keras 3. When using "
"`tf.keras`, to continue using a `tf.keras.optimizers.legacy` "
"optimizer, you can install the `tf_keras` package (Keras 2) and "
"set the environment variable `TF_USE_LEGACY_KERAS=True` to "
"configure TensorFlow to use `tf_keras` when accessing `tf.keras`."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/sgd_test.py | keras/src/optimizers/sgd_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.sgd import SGD
class SGDTest(testing.TestCase):
def test_config(self):
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = SGD(learning_rate=0.5)
self.assertEqual(len(optimizer.variables), 2)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.build([vars])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, -1.0, -0.5, 3.0], rtol=1e-4, atol=1e-4)
self.assertEqual(len(optimizer.variables), 2)
self.assertEqual(optimizer.variables[0], 1)
self.assertEqual(optimizer.variables[1], 0.5)
def test_invalid_momentum(self):
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=-1.0)
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=2.0)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999,
0.9999, 0.9999], [0.9989, 0.9979, 0.9969, 0.9959, 0.9949, 0.9939,
0.9929, 0.9919, 0.9909, 0.9899], [0.9979, 0.9959, 0.9939, 0.9919,
0.9899, 0.9879, 0.9859, 0.9839, 0.9819, 0.9799], [0.9969, 0.9939,
0.9909, 0.9879, 0.9849, 0.9819, 0.9789, 0.9759, 0.9729, 0.9699],
[0.9959, 0.9919, 0.9879, 0.9839, 0.9799, 0.9759, 0.9719, 0.9679,
0.9639, 0.9599]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/sgd.py | keras/src/optimizers/sgd.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. 0 is vanilla
gradient descent. Defaults to `0.0`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="SGD",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if not isinstance(momentum, float) or momentum < 0 or momentum > 1:
raise ValueError("`momentum` must be a float between [0, 1].")
self.momentum = momentum
self.nesterov = nesterov
def build(self, variables):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
if self.built:
return
super().build(variables)
self.momentums = []
if self.momentum != 0:
self.momentums = self.add_optimizer_variables(variables, "momentum")
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = None
if self.momentum != 0:
m = self.momentums[self._get_variable_index(variable)]
if m is not None:
momentum = ops.cast(self.momentum, variable.dtype)
self.assign(
m,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
if self.nesterov:
self.assign_add(
variable,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
else:
self.assign_add(variable, m)
else:
self.assign_sub(variable, ops.multiply(gradient, learning_rate))
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/loss_scale_optimizer_test.py | keras/src/optimizers/loss_scale_optimizer_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.sgd import SGD
class LossScaleOptimizerTest(testing.TestCase):
def _skip_test_for_stateless(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest(
"LossScaleOptimizer must use stateless_apply with JAX."
)
if stateless and backend.backend() == "tensorflow":
self.skipTest(
"stateless_apply is not supported with the TF backend."
)
def test_config(self):
inner_optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
optimizer = LossScaleOptimizer(inner_optimizer)
self.run_class_serialization_test(optimizer)
def test_apply_with_no_vars(self):
self._skip_test_for_stateless(False)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
optimizer.apply(grads)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step_with_inner_loss_scale(self, stateless):
self._skip_test_for_stateless(stateless)
# Ensure that the inner loss scale does not interfere with the update.
inner_optimizer = SGD(learning_rate=0.5, loss_scale_factor=100)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_infinite_step(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(vars, [[1.0, 2.0, 3.0, 4.0]], rtol=1e-4, atol=1e-4)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step_with_overwrite(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
vars[0].overwrite_with_gradient = True
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(vars, grads)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_downscaling(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer, initial_scale=400.0)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
for _ in range(4):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 25.0)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_upscaling(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(
inner_optimizer,
initial_scale=2.0,
dynamic_growth_steps=2,
)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
for _ in range(8):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 32.0)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_iterations_update(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
self.assertEqual(optimizer.iterations.value, 0)
for i in range(3):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertEqual(optimizer.iterations.value, i + 1)
def test_serialization(self):
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(
inner_optimizer,
initial_scale=3.0,
dynamic_growth_steps=2,
name="test_opt",
)
config = optimizer.get_config()
self.assertLen(config, 4)
self.assertEqual(config["name"], "test_opt")
self.assertEqual(config["initial_scale"], 3.0)
self.assertEqual(config["dynamic_growth_steps"], 2)
self.assertIn("inner_optimizer", config)
LossScaleOptimizer.from_config(config)
def test_init_dynamic_arg(self):
inner_optimizer = SGD(learning_rate=0.5)
# dynamic=True is supported
LossScaleOptimizer(inner_optimizer, dynamic=True)
# dynamic=False is not supported
with self.assertRaisesRegex(ValueError, "set `loss_scale_factor`"):
LossScaleOptimizer(inner_optimizer, dynamic=False)
def test_init_unsupported_arg(self):
inner_optimizer = SGD(learning_rate=0.5)
with self.assertRaisesRegex(ValueError, "arguments: `foo`, `bar`"):
LossScaleOptimizer(inner_optimizer, foo=True, bar=3)
@parameterized.named_parameters(
("weight_decay", "weight_decay", 0.5),
("clipnorm", "clipnorm", 0.5),
("global_clipnorm", "global_clipnorm", 0.5),
("clipvalue", "clipvalue", 0.5),
("use_ema", "use_ema", True),
("ema_momentum", "ema_momentum", 0.5),
("ema_overwrite_frequency", "ema_overwrite_frequency", 2),
("loss_scale_factor", "loss_scale_factor", 0.5),
("gradient_accumulation_steps", "gradient_accumulation_steps", 2),
)
def test_init_base_optimizer_unsupported_args(self, arg_name, arg_value):
inner_optimizer = SGD(learning_rate=0.5)
with self.assertRaisesRegex(ValueError, "on the `inner_optimizer`"):
LossScaleOptimizer(inner_optimizer, **{arg_name: arg_value})
def test_deserialization_backwards_compatibility(self):
# Test deserializing with a config that has all the unsupported
# arguments from the base optimizer (which are no longer serialized)
config = {
"name": "loss_scale_optimizer",
"weight_decay": None,
"clipnorm": None,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": False,
"ema_momentum": 0.99,
"ema_overwrite_frequency": None,
"loss_scale_factor": None,
"gradient_accumulation_steps": None,
"inner_optimizer": {
"module": "keras.optimizers",
"class_name": "SGD",
"config": {
"name": "SGD",
"learning_rate": 0.5,
"weight_decay": None,
"clipnorm": None,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": False,
"ema_momentum": 0.99,
"ema_overwrite_frequency": None,
"loss_scale_factor": None,
"gradient_accumulation_steps": None,
"momentum": 0.0,
"nesterov": False,
},
"registered_name": None,
},
"initial_scale": 2.0,
"dynamic_growth_steps": 2,
}
LossScaleOptimizer.from_config(config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adafactor.py | keras/src/optimizers/adafactor.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adafactor"])
class Adafactor(optimizer.Optimizer):
"""Optimizer that implements the Adafactor algorithm.
Adafactor is commonly used in NLP tasks, and has the advantage
of taking less memory because it only saves partial information of previous
gradients.
The default argument setup is based on the original paper (see reference).
When gradients are of dimension > 2, Adafactor optimizer will delete the
last 2 dimensions separately in its accumulator variables.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_2_decay: float, defaults to -0.8. The decay rate of `beta_2`.
epsilon_1: float, defaults to 1e-30. A small offset to keep denominator
away from 0.
epsilon_2: float, defaults to 1e-3. A small offset to avoid learning
rate becoming too small by time.
clip_threshold: float, defaults to 1.0. Clipping threshold. This is a
part of Adafactor algorithm, independent from `clipnorm`,
`clipvalue`, and `global_clipnorm`.
relative_step: bool, defaults to `True`. If `learning_rate` is a
constant and `relative_step=True`, learning rate will be adjusted
based on current iterations. This is a default learning rate decay
in Adafactor.
{{base_optimizer_keyword_args}}
Reference:
- [Shazeer, Noam et al., 2018](https://arxiv.org/abs/1804.04235).
"""
def __init__(
self,
learning_rate=0.001,
beta_2_decay=-0.8,
epsilon_1=1e-30,
epsilon_2=1e-3,
clip_threshold=1.0,
relative_step=True,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adafactor",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_2_decay = beta_2_decay
self.epsilon_1 = epsilon_1
self.epsilon_2 = epsilon_2
self.clip_threshold = clip_threshold
self.relative_step = relative_step
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._r = []
self._c = []
self._v = []
for var in var_list:
if len(var.shape) < 2:
# Don't factor if variable is of dimension < 2, but we still
# need to create dummy variables as placeholder.
self._r.append(
backend.Variable(0, name=var.name, trainable=False)
)
self._c.append(
backend.Variable(0, name=var.name, trainable=False)
)
elif self._overwrite_variable_with_gradient(var):
self._r.append(None)
self._c.append(None)
else:
# Always factor the last 2 dimensions.
r_shape = var.shape[:-1]
c_shape = var.shape[:-2] + (var.shape[-1],)
self._r.append(
self.add_variable(
shape=r_shape,
dtype=var.dtype,
name=var.name,
)
)
self._c.append(
self.add_variable(
shape=c_shape,
dtype=var.dtype,
name=var.name,
)
)
if self._overwrite_variable_with_gradient(var):
self._v.append(None)
else:
self._v.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
def _rms(self, x):
return ops.sqrt(ops.mean(ops.square(x)))
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
epsilon_2 = ops.cast(self.epsilon_2, variable.dtype)
one = ops.cast(1.0, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
if not callable(self._learning_rate) and self.relative_step:
lr = ops.minimum(lr, 1 / ops.sqrt(local_step))
r = self._r[self._get_variable_index(variable)]
c = self._c[self._get_variable_index(variable)]
v = self._v[self._get_variable_index(variable)]
rho_t = ops.minimum(lr, 1 / ops.sqrt(local_step))
alpha_t = ops.maximum(epsilon_2, self._rms(variable)) * rho_t
regulated_grad_square = ops.add(ops.square(gradient), self.epsilon_1)
beta_2_t = ops.subtract(1, ops.power(local_step, self.beta_2_decay))
if len(variable.shape) >= 2:
# `r` deletes the last dimension of gradient, so it is of shape
# `gradient.shape[:-1]`.
self.assign(
r,
ops.add(
ops.multiply(beta_2_t, r),
ops.multiply(
ops.subtract(1, beta_2_t),
ops.mean(regulated_grad_square, axis=-1),
),
),
)
# `c` deletes the second last dimension of gradient, so it is of
# shape `gradient.shape[:-2] + gradient.shape[-1]`.
self.assign(
c,
ops.add(
ops.multiply(beta_2_t, c),
ops.multiply(
ops.subtract(1, beta_2_t),
ops.mean(regulated_grad_square, axis=-2),
),
),
)
self.assign(
v,
ops.multiply(
ops.expand_dims(
ops.divide(r, ops.mean(r, axis=-1, keepdims=True)),
axis=-1,
),
ops.expand_dims(c, -2),
),
)
else:
self.assign(
v,
ops.add(
ops.multiply(beta_2_t, v),
ops.multiply(
ops.subtract(1, beta_2_t), regulated_grad_square
),
),
)
u_t = ops.divide(gradient, ops.sqrt(v))
u_t_hat = ops.divide(
u_t,
ops.maximum(one, ops.divide(self._rms(u_t), self.clip_threshold)),
)
self.assign_sub(variable, ops.multiply(alpha_t, u_t_hat))
def get_config(self):
config = super().get_config()
config.update(
{
"beta_2_decay": self.beta_2_decay,
"epsilon_1": self.epsilon_1,
"epsilon_2": self.epsilon_2,
"clip_threshold": self.clip_threshold,
"relative_step": self.relative_step,
}
)
return config
Adafactor.__doc__ = Adafactor.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adamax_test.py | keras/src/optimizers/adamax_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adamax import Adamax
class AdamaxTest(testing.TestCase):
def test_config(self):
optimizer = Adamax(
learning_rate=0.5,
beta_1=0.8,
beta_2=0.95,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adamax(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adamax(
learning_rate=0.2, beta_1=0.85, beta_2=0.95, epsilon=1e-6
)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
[0.6827, 0.6873, 0.6888, 0.6896, 0.6901, 0.6904, 0.6906, 0.6908, 0.6909, 0.691],
[0.5333, 0.5407, 0.5431, 0.5444, 0.5451, 0.5456, 0.546, 0.5462, 0.5464, 0.5466],
[0.368, 0.3773, 0.3804, 0.382, 0.3829, 0.3835, 0.384, 0.3843, 0.3846, 0.3848],
[0.1933, 0.204, 0.2076, 0.2094, 0.2105, 0.2112, 0.2117, 0.2121, 0.2124, 0.2126]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adamax(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adamax(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/rmsprop.py | keras/src/optimizers/rmsprop.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.RMSprop"])
class RMSprop(optimizer.Optimizer):
"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to 1e-7.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
{{base_optimizer_keyword_args}}
Example:
>>> opt = keras.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = keras.backend.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> opt.minimize(loss, [var1])
>>> var1
9.683772
Reference:
- [Hinton, 2012](
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="rmsprop",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._velocities = self.add_optimizer_variables(var_list, "velocity")
self._momentums = []
if self.momentum > 0:
self._momentums = self.add_optimizer_variables(var_list, "momentum")
self._average_gradients = []
if self.centered:
self._average_gradients = self.add_optimizer_variables(
var_list, "average_gradient"
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
velocity = self._velocities[self._get_variable_index(variable)]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._get_variable_index(variable)]
average_grad = None
if self.centered:
average_grad = self._average_gradients[
self._get_variable_index(variable)
]
rho = self.rho
self.assign(
velocity,
ops.add(
ops.multiply(rho, velocity),
ops.multiply(1 - rho, ops.square(gradient)),
),
)
if self.centered:
self.assign(
average_grad,
ops.add(
ops.multiply(rho, average_grad),
ops.multiply(1 - rho, gradient),
),
)
denominator = velocity - ops.square(average_grad) + self.epsilon
else:
denominator = ops.add(velocity, self.epsilon)
increment = ops.divide(
ops.multiply(lr, gradient), ops.sqrt(denominator)
)
if self.momentum > 0:
self.assign(
momentum,
ops.add(ops.multiply(self.momentum, momentum), increment),
)
self.assign_sub(variable, momentum)
else:
self.assign_sub(variable, increment)
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"momentum": self.momentum,
"epsilon": self.epsilon,
"centered": self.centered,
}
)
return config
RMSprop.__doc__ = RMSprop.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/muon_test.py | keras/src/optimizers/muon_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.layers import Dense
from keras.src.layers import Embedding
from keras.src.layers import Input
from keras.src.models import Sequential
from keras.src.optimizers.muon import Muon
class MuonTest(testing.TestCase):
def test_config(self):
optimizer = Muon(
learning_rate=0.5,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_Newton_Schulz(self):
optimizer = Muon()
tensor_input = ops.array([[0.2499, 0.9105], [0.2655, 0.8824]])
except_output = ops.array([[-0.4422, 0.6457], [0.7285, 0.2968]])
output = optimizer.zeropower_via_newtonschulz5(tensor_input, 5)
self.assertAllClose(
output,
except_output,
rtol=1e-3,
atol=1e-3,
tpu_atol=1e-1,
tpu_rtol=1e-1,
)
def test_adamw_single_step(self):
optimizer = Muon()
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0], name="test_vars")
optimizer.build([vars])
optimizer.update_step(grads, vars, 0.5)
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_should_use_adamw(self):
vars = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer = Muon(exclude_layers=["var"])
self.assertTrue(optimizer._should_use_adamw(vars))
embedding = Embedding(2, 2)
embedding.build()
self.assertTrue(optimizer._should_use_adamw(embedding.weights[0]))
vars = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer = Muon()
self.assertFalse(optimizer._should_use_adamw(vars))
dense = Dense(2)
dense.build([None, 2])
self.assertFalse(optimizer._should_use_adamw(dense.weights[0]))
def test_muon_single_step(self):
optimizer = Muon(
learning_rate=0.5,
weight_decay=0,
)
grads = ops.array([[1.0, 6.0], [7.0, 2.0]])
vars = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer.build([vars])
optimizer.update_step(grads, vars, 0.5)
self.assertAllClose(
vars,
[[0.988775, 1.887053], [2.873428, 3.97035]],
rtol=1e-2,
atol=1e-2,
)
def test_clip_norm(self):
optimizer = Muon(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Muon(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_muon_weight_decay(self):
variable = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
weight_decay = 0.01
expected_variable = variable - variable * weight_decay
optimizer = Muon(learning_rate=1.0, weight_decay=weight_decay)
optimizer._apply_weight_decay([variable])
self.assertAllClose(variable, expected_variable, rtol=1e-4, atol=1e-4)
def test_adamw_weight_decay(self):
variable = backend.Variable(2.0)
weight_decay = 0.01
expected_variable = variable - variable * weight_decay
optimizer = Muon(learning_rate=1.0, adam_weight_decay=weight_decay)
optimizer._apply_weight_decay([variable])
self.assertAllClose(variable, expected_variable, rtol=1e-4, atol=1e-4)
def test_lr_adjust_none(self):
opt = Muon(rms_rate=None)
x = ops.ones((4, 4))
want = x
self.assertAllClose(opt.lr_adjust(x), want)
def test_lr_adjust_2d(self):
opt = Muon(rms_rate=0.2)
x = ops.ones((4, 2))
want = x * 0.2 * 2
self.assertAllClose(opt.lr_adjust(x), want)
@pytest.mark.requires_trainable_backend
def test_model_fit(self):
model = Sequential([Input((10,)), Dense(5), Dense(1, name="last")])
x = ops.ones((1, 10))
y = ops.ones((1, 1))
optimizer = Muon(learning_rate=1e-3, exclude_layers=["last"])
model.compile(optimizer=optimizer, loss="mse")
model.fit(x, y)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/base_optimizer.py | keras/src/optimizers/base_optimizer.py | import re
import warnings
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.optimizers.schedules import learning_rate_schedule
from keras.src.saving import serialization_lib
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils import tracking
from keras.src.utils.naming import auto_name
class BaseOptimizer(KerasSaveable):
"""Abstract optimizer base class.
If you intend to create your own optimization algorithm, please inherit from
this class and override the following methods:
- `build`: Create your optimizer-related variables, such as momentum
variables in the SGD optimizer.
- `update_step`: Implement your optimizer's variable updating logic.
- `get_config`: serialization of the optimizer.
Example:
```python
class SGD(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.momentum = 0.9
def build(self, variables):
super().build(variables)
self.momentums = []
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = self.momentums[self._get_variable_index(variable)]
self.assign(
m,
ops.subtract(
ops.multiply(m, ops.cast(self.momentum, variable.dtype)),
ops.multiply(gradient, learning_rate),
),
)
self.assign_add(variable, m)
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
```
"""
def __init__(
self,
learning_rate,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name=None,
**kwargs,
):
self._lock = False
if kwargs.pop("decay", None) is not None:
warnings.warn(
"Argument `decay` is no longer supported and will be ignored."
)
if kwargs:
raise ValueError(f"Argument(s) not recognized: {kwargs}")
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
self.weight_decay = weight_decay
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
self.use_ema = use_ema
self.loss_scale_factor = loss_scale_factor
self.gradient_accumulation_steps = gradient_accumulation_steps
if gradient_accumulation_steps:
if not gradient_accumulation_steps >= 2:
raise ValueError(
"`gradient_accumulation_steps` must be an integer >= 2. "
"Received: gradient_accumulation_steps="
f"{gradient_accumulation_steps}"
)
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError(
"`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}"
)
if ema_overwrite_frequency and (
not isinstance(ema_overwrite_frequency, int)
or ema_overwrite_frequency < 1
):
raise ValueError(
"`ema_overwrite_frequency` must be an integer >= 1 or "
"None. Received: ema_overwrite_frequency="
f"{ema_overwrite_frequency}"
)
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
clip_args_sum = sum(
a is not None for a in [clipnorm, clipvalue, global_clipnorm]
)
if clip_args_sum > 1:
raise ValueError(
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can "
f"be set. Received: clipnorm={clipnorm}, "
f"clipvalue={clipvalue}, global_clipnorm={global_clipnorm}"
)
self.built = False
# Set up variable tracking.
self._variables = []
self._trainable_variables = []
self._tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
}
)
self._trainable_variables_indices = {}
# Create iteration variable
# Note: dtype="int" will resolve to int32 in JAX
# (since int64 is disallowed in JAX) and to int64 in TF.
with backend.name_scope(self.name, caller=self):
iterations = backend.Variable(
0,
name="iteration",
dtype="int",
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(iterations)
self._iterations = iterations
# Create learning rate (schedule or variable)
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if not isinstance(learning_rate, float):
raise ValueError(
"Argument `learning_rate` should be float, or an instance "
"of LearningRateSchedule, or a callable "
"(that takes in the current iteration value "
"and returns the corresponding learning rate value). "
f"Received instead: learning_rate={learning_rate}"
)
with backend.name_scope(self.name, caller=self):
learning_rate = backend.Variable(
learning_rate,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(learning_rate)
self._learning_rate = learning_rate
@property
def iterations(self):
if self.gradient_accumulation_steps:
return ops.floor_divide(
self._iterations, self.gradient_accumulation_steps
)
return self._iterations
def _track_variable(self, variable):
self._tracker.add_to_store("variables", variable)
def _overwrite_variable_with_gradient(self, variable):
return getattr(variable, "overwrite_with_gradient", False)
@tracking.no_automatic_dependency_tracking
def build(self, variables):
if self.use_ema:
self._model_variables_moving_average = self.add_optimizer_variables(
variables, "average"
)
if self.gradient_accumulation_steps:
self._accumulated_gradients = []
for i, variable in enumerate(variables):
self._trainable_variables_indices[self._var_key(variable)] = i
if self.gradient_accumulation_steps:
self._accumulated_gradients.append(
self.add_variable_from_reference(
variable,
name="gradient_accumulator",
)
)
self._trainable_variables = variables[:]
self.built = True
def _var_key(self, variable):
# Helper function to get a stable ID and the variable instance mapping.
return id(variable)
@property
def variables(self):
return self._variables[:]
def _get_variable_index(self, variable):
return self._trainable_variables_indices[self._var_key(variable)]
def add_variable(
self,
shape,
initializer="zeros",
dtype=None,
aggregation="none",
layout=None,
name=None,
):
"""Add a variable to the optimizer.
Args:
shape: Shape tuple for the variable. Must be fully-defined
(no `None` entries).
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). Defaults to `"zeros"`.
dtype: Dtype of the variable to create, e.g. `"float32"`. If
unspecified, defaults to the `keras.backend.floatx()`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"`. Annotates the variable with
the type of multi-replica aggregation to be used for this
variable when writing custom data parallel training loops.
Defaults to `"none"`.
layout: Optional tensor layout. Defaults to `None`.
name: String name of the variable. Useful for debugging purposes.
Returns:
An optimizer variable, in the format of `keras.Variable`.
"""
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
aggregation=aggregation,
layout=layout,
name=name,
)
self._track_variable(variable)
return variable
def add_variable_from_reference(
self, reference_variable, name=None, initializer="zeros"
):
"""Add an optimizer variable from the model variable.
Create an optimizer variable based on the information of model variable.
For example, in SGD optimizer momemtum, for each model variable, a
corresponding momemtum variable is created of the same shape and dtype.
Args:
reference_variable: `keras.Variable`. The corresponding model
variable to the optimizer variable to be created.
name: Optional string. The name prefix of the optimizer variable to
be created. If not provided, it will be set to `"var"`. The
variable name will follow the pattern
`{variable_name}_{reference_variable.name}`,
e.g., `momemtum/dense_1`. Defaults to `None`.
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"zeros"`.
Returns:
An optimizer variable, in the format of `keras.Variable`.
"""
name = name or "var"
if hasattr(reference_variable, "path"):
name = f"{reference_variable.path.replace('/', '_')}_{name}"
else:
sanitised_ref_name = (
str(reference_variable.name).replace("/", "_").replace(":", "_")
)
name = f"{sanitised_ref_name}_{name}"
return self.add_variable(
shape=reference_variable.shape,
initializer=initializer,
dtype=reference_variable.dtype,
name=name,
layout=getattr(reference_variable, "_layout", None),
)
def add_optimizer_variables(
self, trainable_variables, name, initializer="zeros"
):
"""Add optimizer variables from the list of trainable model variables.
Create an optimizer variable based on the information of the supplied
model variables. For example, in SGD optimizer momemtum, for each model
variable, a corresponding momemtum variable is created of the same shape
and dtype.
Note that trainable variables with `v.overwrite_with_gradient == True`
will insert `None`, into the output list, since the optimizer variable
will not be used anyways, and could be wasteful.
Args:
trainable_variables: `keras.Variable`, the corresponding model
variable to the optimizer variable to be created.
name: The name prefix(es) of the optimizer variable(s) to be
created. Can be a single string or list of strings. If a
list of strings, will create an optimizer variable for each
prefix. The variable name will follow the pattern
`{variable_name}_{trainable_variable.name}`, e.g.,
`momemtum/dense_1`.
initializer: Initializer object(s) to use to populate the initial
variable value(s), or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"zeros"`.
Returns:
A list of optimizer variables, in the format of `keras.Variable`s.
If multiple names are provide, returns a tuple of lists.
"""
name_list = name
initializer_list = initializer
if isinstance(name, str):
# Single name/initializer.
name_list = [name]
initializer_list = [initializer]
else:
# Multiple names/initializers.
# If there is only one initializer, use it for all names.
if isinstance(initializer, str) or isinstance(
initializer, initializers.Initializer
):
initializer_list = [initializer] * len(name_list)
if len(name_list) != len(initializer_list):
raise ValueError(
f"The number of provided names must match the number of "
f"provided initializers. Received name='{name}', "
f"initializer='{initializer}'"
)
# Build up lists of optimizer variables.
optimizer_variables = tuple([] for _ in name_list)
for variable in trainable_variables:
# Interleaves adding variables for backward-compatibility.
if not self._overwrite_variable_with_gradient(variable):
for i, (var_name, var_init) in enumerate(
zip(name_list, initializer_list)
):
optimizer_variables[i].append(
self.add_variable_from_reference(
variable,
name=var_name,
initializer=var_init,
)
)
else:
for i in range(len(name_list)):
optimizer_variables[i].append(None)
# If single input name, return the single list.
if isinstance(name, str):
return optimizer_variables[0]
return optimizer_variables
def _check_variables_are_known(self, variables):
for v in variables:
if self._var_key(v) not in self._trainable_variables_indices:
raise ValueError(
f"Unknown variable: {v}. This optimizer can only "
"be called for the variables it was originally built with. "
"When working with a new set of variables, you should "
"recreate a new optimizer instance."
)
def assign(self, variable, value):
"""Assign a value to a variable.
This should be used in optimizers instead of `variable.assign(value)` to
support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign(value)
def assign_add(self, variable, value):
"""Add a value to a variable.
This should be used in optimizers instead of
`variable.assign_add(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_add(value)
def assign_sub(self, variable, value):
"""Subtract a value from a variable.
This should be used in optimizers instead of
`variable.assign_sub(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_sub(value)
def update_step(self, gradient, variable, learning_rate):
raise NotImplementedError
def apply_gradients(self, grads_and_vars):
grads, trainable_variables = zip(*grads_and_vars)
self.apply(grads, trainable_variables)
# Return iterations for compat with tf.keras.
return self._iterations
def apply(self, grads, trainable_variables=None):
"""Update traininable variables according to provided gradient values.
`grads` should be a list of gradient tensors
with 1:1 mapping to the list of variables the optimizer was built with.
`trainable_variables` can be provided
on the first call to build the optimizer.
"""
if len(grads) == 0:
# It is possible that the grad is empty. In this case,
# `apply_gradients` is a no-op.
return
if trainable_variables is None:
if not self.built:
raise ValueError(
"When passing `grads` without `variables`, the optimizer "
"must already be built on a list of variables. "
"Call `optimizer.build(trainable_variables)` first. "
)
if len(grads) != len(self._trainable_variables_indices):
raise ValueError(
"When passing `grads` as a list of gradient tensors, the "
f"gradients must match `optimizer.variables` one-to-on. "
f"Received a list of {len(grads)} gradients, but the "
f"optimizer is tracking {len(self._trainable_variables)} "
"trainable variables."
)
trainable_variables = self._trainable_variables
else:
trainable_variables = list(trainable_variables)
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
self._check_variables_are_known(trainable_variables)
with backend.name_scope(self.name, caller=self):
# Filter empty gradients.
grads, trainable_variables = self._filter_empty_gradients(
grads, trainable_variables
)
# Overwrite targeted variables directly with their gradients if
# their `overwrite_with_gradient` is set.
grads, trainable_variables = (
self._overwrite_variables_directly_with_gradients(
grads, trainable_variables
)
)
if len(list(grads)) > 0:
# Unscale gradients.
scale = self.loss_scale_factor
if scale is not None:
grads = [g if g is None else g / scale for g in grads]
# Apply gradient updates.
self._backend_apply_gradients(grads, trainable_variables)
# Apply variable constraints after applying gradients.
for variable in trainable_variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
# Update iteration counter.
self._iterations.assign_add(1)
def _backend_apply_gradients(self, grads, trainable_variables):
"""Apply method that can be overridden by different backends.
JAX overrides it in order to deal with statelessness in gradient
accumulation and EMA handling.
The below implementation is intended to be generally backend-agnostic,
but may not work with all backends.
This method does 4 things:
- Call the optimizer's update_step() to update trainable variables
and optimizer variables.
- Update EMA variables, if EMA is configured.
- Update gradient accumulators, if gradient accumulation is configured.
- Update the iteration counter.
"""
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
def _update_step_fn(grads, trainable_variables):
# Run update step with accumulated grads + reset accumulators
steps = self.gradient_accumulation_steps
grads = [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
]
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
self._backend_reset_gradient_accumulators()
ops.cond(
is_update_step,
lambda: _update_step_fn(grads, trainable_variables),
lambda: self._backend_increment_gradient_accumulators(
grads, acc_grads
),
)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
# Run update step.
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
ops.cond(
should_overwrite_model_vars,
lambda: self._overwrite_model_variables_with_average_value(
self._trainable_variables
),
lambda: None,
)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
"""Collective update_step that can be overridden by the backend.
It is overridden by torch for performance reasons, and
by TF to support tf.distribute.
"""
for grad, var in zip(grads, trainable_variables):
self.update_step(grad, var, learning_rate)
def _backend_reset_gradient_accumulators(self):
for g_acc in self._accumulated_gradients:
if g_acc is not None:
g_acc.assign(ops.zeros(g_acc.shape, dtype=g_acc.dtype))
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
new_g_accs = [(g + acc_g) for g, acc_g in zip(grads, acc_grads)]
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
"""Stateless version of `apply` that returns modified variables.
Args:
optimizer_variables: list of tensors containing the current values
for the optimizer variables. These are native tensors and not
`keras.Variable`s.
grads: list of gradients to apply.
trainable_variables: list of tensors containing the current values
for the model variables. These are native tensors and not
`keras.Variable`s.
Returns: A tuple containing two list of tensors, the updated
`trainable_variables` and the updated `optimizer_variables`.
"""
self._check_super_called()
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
if len(optimizer_variables) != len(self.variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(optimizer_variables)}, but "
f"expected {len(self.variables)} variables."
)
if len(trainable_variables) != len(self._trainable_variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
"corresponding 1:1 to the trainable variables list that "
"the optimizer was built with. Received "
f"len(trainable_variables) == {len(trainable_variables)} "
"whereas the optimizer was built with "
f"{len(self._trainable_variables)} variables."
)
# Gather variable mapping
mapping = list(
zip(self._trainable_variables, trainable_variables)
) + list(zip(self.variables, optimizer_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.apply(grads)
# Gather updated variables
trainable_variables = []
for v in self._trainable_variables:
new_v = scope.get_current_value(v)
if new_v is not None:
trainable_variables.append(new_v)
else:
trainable_variables.append(v)
optimizer_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
optimizer_variables.append(new_v)
else:
optimizer_variables.append(v)
return trainable_variables, optimizer_variables
def scale_loss(self, loss):
"""Scale the loss before computing gradients.
Scales the loss before gradients are computed in a `train_step`. This
is primarily useful during mixed precision training to prevent numeric
underflow.
"""
if self.loss_scale_factor is not None:
return loss * self.loss_scale_factor
return loss
@property
def learning_rate(self):
return self._get_current_learning_rate()
@learning_rate.setter
def learning_rate(self, learning_rate):
if isinstance(self._learning_rate, backend.Variable):
prev_lr_var = self._learning_rate
else:
prev_lr_var = None
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
raise TypeError(
"This optimizer was created with a `LearningRateSchedule`"
" object as its `learning_rate` constructor argument, "
"hence its learning rate is not settable. If you need the"
" learning rate to be settable, you should instantiate "
"the optimizer with a float `learning_rate` argument."
)
self._learning_rate.assign(learning_rate)
if prev_lr_var is not None and not isinstance(
self._learning_rate, backend.Variable
):
# Untrack learning rate variable
self._untrack_variable(prev_lr_var)
def set_weights(self, weights):
"""Set the weights of the optimizer."""
if not self.built:
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
def save_own_variables(self, store):
"""Get the state of this optimizer object."""
for i, variable in enumerate(self.variables):
store[str(i)] = variable.numpy()
def load_own_variables(self, store):
"""Set the state of this optimizer object."""
if len(store.keys()) != len(self.variables):
msg = (
f"Skipping variable loading for optimizer '{self.name}', "
f"because it has {len(self.variables)} variables whereas "
f"the saved optimizer has {len(store.keys())} variables. "
)
if len(self.variables) == 0:
msg += (
"This is likely because the optimizer has not been "
"called/built yet."
)
warnings.warn(msg, stacklevel=2)
return
for i, variable in enumerate(self.variables):
variable.assign(store[str(i)])
def _get_current_learning_rate(self):
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
return self._learning_rate(self._iterations)
elif isinstance(self._learning_rate, backend.Variable):
return self._learning_rate
elif callable(self._learning_rate):
return self._learning_rate()
return self._learning_rate
def _overwrite_variables_directly_with_gradients(self, grads, vars):
"""Overwrite the variables directly by their gradients.
This method is designed for a special case where we want to overwrite
the variable directly with its computed gradient. For example, in float8
training, new `scale` and `amax_history` are computed as gradients, and
we want to overwrite them directly instead of following the typical
procedure such as gradient descent with a learning rate, gradient
clipping and weight decaying.
After the update, the processed pairs will be filtered out.
"""
# Shortcut for `tf.Variable` because it doesn't have a
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/adagrad_test.py | keras/src/optimizers/adagrad_test.py | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adagrad import Adagrad
class AdagradTest(testing.TestCase):
def test_config(self):
optimizer = Adagrad(
learning_rate=0.5,
initial_accumulator_value=0.2,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adagrad(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.5233, 1.5007, 2.5005, 3.5061], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adagrad(
learning_rate=0.2, initial_accumulator_value=0.3, epsilon=1e-6
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963],
[0.9604, 0.9278, 0.9003, 0.8784, 0.8615, 0.8487, 0.8388, 0.8313, 0.8255, 0.8209],
[0.9251, 0.8629, 0.8137, 0.7768, 0.7497, 0.7298, 0.7151, 0.704, 0.6956, 0.6891],
[0.8903, 0.8012, 0.7342, 0.6862, 0.6521, 0.6277, 0.6099, 0.5967, 0.5867, 0.579],
[0.856, 0.7422, 0.6604, 0.6037, 0.5644, 0.5367, 0.5168, 0.5021, 0.491, 0.4825]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adagrad(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adagrad(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/lamb.py | keras/src/optimizers/lamb.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.Lamb")
class Lamb(optimizer.Optimizer):
"""Optimizer that implements the Lamb algorithm.
Lamb is a stochastic gradient descent method that
uses layer-wise adaptive moments to adjusts the
learning rate for each parameter based on the ratio of the
norm of the weight to the norm of the gradient
This helps to stabilize the training process and improves convergence
especially for large batch sizes.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability.
Defaults to `1e-7`.
{{base_optimizer_keyword_args}}
References:
- [Yang et al.](https://arxiv.org/pdf/1904.00962)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="lamb",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Lamb optimizer has 2 types of variables: momentums and velocities
Args:
var_list: list of model variables to build Lamb variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums, self._velocities = self.add_optimizer_variables(
var_list, ["momentum", "velocity"]
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
beta_2_power = ops.power(
ops.cast(self.beta_2, variable.dtype), local_step
)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)
)
self.assign_add(
v,
ops.multiply(
ops.subtract(ops.square(gradient), v), 1 - self.beta_2
),
)
m_t_hat = ops.divide(m, (1.0 - beta_1_power))
v_sqrt = ops.add(
ops.sqrt(ops.divide(v, (1.0 - beta_2_power))), self.epsilon
)
update = ops.divide(m_t_hat, v_sqrt)
w_norm = ops.sqrt(ops.sum(ops.power(variable, 2)))
g_norm = ops.sqrt(ops.sum(ops.power(update, 2)))
# ratio = w_norm / g_norm if w_norm > 0 and g_norm > 0 else 1
ratio = ops.where(
ops.greater(w_norm, 0),
ops.where(ops.greater(g_norm, 0), (w_norm / g_norm), 1.0),
1.0,
)
self.assign_sub(variable, ratio * lr * update)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Lamb.__doc__ = Lamb.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/loss_scale_optimizer.py | keras/src/optimizers/loss_scale_optimizer.py | from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
from keras.src.saving import serialization_lib
from keras.src.utils import tracking
@keras_export(
[
"keras.optimizers.LossScaleOptimizer",
"keras.mixed_precision.LossScaleOptimizer",
]
)
class LossScaleOptimizer(optimizer.Optimizer):
"""An optimizer that dynamically scales the loss to prevent underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
intermediate gradients to be scaled by the loss scale as well. The final
gradients are divided (or "unscaled") by the loss scale to bring them back
to their original value.
`LossScaleOptimizer` wraps another optimizer and applies dynamic loss
scaling to it. This loss scale is dynamically updated over time as follows:
- On any train step, if a nonfinite gradient is encountered, the loss scale
is halved, and the train step is skipped.
- If `dynamic_growth_steps` have occurred since the last time the loss scale
was updated, and no nonfinite gradients have occurred, the loss scale
is doubled.
Args:
inner_optimizer: The `keras.optimizers.Optimizer` instance to wrap.
initial_scale: Float. The initial loss scale. This scale will be updated
during training. It is recommended for this to be a very high
number, because a loss scale that is too high gets lowered far more
quickly than a loss scale that is too low gets raised.
dynamic_growth_steps: Int. How often to update the scale upwards. After
every `dynamic_growth_steps` steps with finite gradients, the
loss scale is doubled.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
inner_optimizer,
initial_scale=2.0**15,
dynamic_growth_steps=2000,
name=None,
**kwargs,
):
if not kwargs.pop("dynamic", True):
raise ValueError(
"LossScaleOptimizer no longer supports `dynamic=False`. "
"Instead, simply set `loss_scale_factor` directly on the "
"`inner_optimizer`."
)
# Backwards compatibility code for deserialization.
# LossScaleOptimizer used to return all these parameters in `get_config`
# from `super.get_config` even though they are all non-functional. We
# no longer let user set them, but we have to allow the default values
# to be passed during deserialization to support older models.
base_optimizer_defaults = {
"weight_decay": None,
"clipnorm": None,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": False,
"ema_momentum": 0.99,
"ema_overwrite_frequency": None,
"loss_scale_factor": None,
"gradient_accumulation_steps": None,
}
for arg_name, default_value in base_optimizer_defaults.items():
if arg_name not in kwargs:
continue
arg_value = kwargs.pop(arg_name)
if (
default_value is None and arg_value is not None
) or arg_value != default_value:
raise ValueError(
f"LossScaleOptimizer does not support `{arg_name}`. "
f"Instead, set `{arg_name}` on the `inner_optimizer`."
)
if kwargs:
raise ValueError(
"LossScaleOptimizer does not support arguments: "
f"`{'`, `'.join(kwargs.keys())}`."
)
super().__init__(learning_rate=0.0, name=name)
self.inner_optimizer = inner_optimizer
self.initial_scale = initial_scale
self.dynamic_growth_steps = dynamic_growth_steps
# Disable the inner optimizer's loss scaling, otherwise
# gradients will be scaled twice.
self.inner_optimizer.loss_scale_factor = None
@tracking.no_automatic_dependency_tracking
def build(self, var_list):
self.step_counter = self.add_variable(
shape=(),
dtype="int",
initializer=initializers.Zeros(),
aggregation="none",
name="step_counter",
)
self.dynamic_scale = self.add_variable(
shape=(),
dtype="float32",
initializer=initializers.Constant(self.initial_scale),
aggregation="none",
name="dynamic_scale",
)
self.inner_optimizer.build(var_list)
super().build(var_list)
@property
def variables(self):
return self._variables + self.inner_optimizer.variables
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
finite = self.check_finite(grads)
return ops.cond(
finite,
lambda: self._stateless_handle_finite_grads(
optimizer_variables, grads, trainable_variables
),
lambda: self._stateless_handle_non_finite_grads(
optimizer_variables, trainable_variables
),
)
def _stateless_handle_finite_grads(
self, optimizer_variables, grads, trainable_variables
):
def upscale():
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign(0)
self.dynamic_scale.assign(ops.multiply(self.dynamic_scale, 2.0))
return [scope.get_current_value(v) for v in self._variables]
def increment():
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign_add(1)
return [scope.get_current_value(v) for v in self._variables]
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping):
# Potentially upscale loss and reset counter.
own_variables = ops.cond(
ops.equal(self.step_counter, self.dynamic_growth_steps - 1),
upscale,
increment,
)
# Unscale gradients.
scale = self.dynamic_scale
unscaled_grads = [
g
if g is None or self._overwrite_variable_with_gradient(v)
else ops.divide(g, scale)
for g, v in zip(grads, self._trainable_variables)
]
(
new_trainable_variables,
new_inner_variables,
) = self.inner_optimizer.stateless_apply(
self.inner_optimizer.variables,
unscaled_grads,
trainable_variables,
)
new_optimizer_variables = own_variables + new_inner_variables
return new_trainable_variables, new_optimizer_variables
def _stateless_handle_non_finite_grads(
self, optimizer_variables, trainable_variables
):
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign(0)
self.dynamic_scale.assign(ops.multiply(self.dynamic_scale, 0.5))
new_optimizer_variables = []
for v in self.variables:
new_optimizer_variables.append(scope.get_current_value(v))
return trainable_variables, new_optimizer_variables
def apply(self, grads, trainable_variables=None):
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
if backend.backend() == "tensorflow":
self._tf_apply(grads, trainable_variables)
else:
self._common_apply(grads, trainable_variables)
def _stateful_handle_finite_grads(self, grads, trainable_variables):
scale = self.dynamic_scale
# Unscale gradients.
tvs = trainable_variables or self._trainable_variables
unscaled_grads = [
g
if g is None or self._overwrite_variable_with_gradient(v)
else ops.divide(g, scale)
for g, v in zip(grads, tvs)
]
self.inner_optimizer.apply(
unscaled_grads, trainable_variables=trainable_variables
)
def upscale():
self.step_counter.assign(0)
self.dynamic_scale.assign(ops.multiply(self.dynamic_scale, 2.0))
def increment():
self.step_counter.assign_add(1)
# Potentially upscale loss and reset counter.
ops.cond(
ops.equal(self.step_counter, self.dynamic_growth_steps - 1),
upscale,
increment,
)
def _stateful_handle_non_finite_grads(self):
# If any inf or nan in grads, downscale loss and reset counter.
self.step_counter.assign(0)
self.dynamic_scale.assign(ops.multiply(self.dynamic_scale, 0.5))
def _common_apply(self, grads, trainable_variables=None):
finite = self.check_finite(grads)
ops.cond(
finite,
lambda: self._stateful_handle_finite_grads(
grads, trainable_variables
),
self._stateful_handle_non_finite_grads,
)
def _tf_apply(self, grads, trainable_variables=None):
"""Tensorflow specific logic for apply, which handles distribution."""
from keras.src.utils.module_utils import tensorflow as tf
if tf.distribute.in_cross_replica_context():
raise ValueError("apply() must be called in a replica context.")
if tf.__internal__.distribute.strategy_supports_no_merge_call():
self._common_apply(grads, trainable_variables=trainable_variables)
else:
def _handle_cross_replica(distribution, grads, trainable_variables):
finite_per_replica = (
distribution.extended.call_for_each_replica(
self.check_finite, args=(grads,)
)
)
# Each replica computed the same `finite` value, since
# `grads` is all-reduced across replicas. Arbitrarily take
# `finite` from the first replica.
finite = distribution.experimental_local_results(
finite_per_replica
)[0]
def apply_fn():
distribution.extended.call_for_each_replica(
self._stateful_handle_finite_grads,
args=(grads, trainable_variables),
)
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a
# replica context with a branch that calls `merge_call`, and
# self._optimizer.apply_gradients calls `merge_call`.
ops.cond(
finite, apply_fn, self._stateful_handle_non_finite_grads
)
tf.distribute.get_replica_context().merge_call(
_handle_cross_replica, args=(grads, trainable_variables)
)
def check_finite(self, grads):
tensor_grads = [g for g in grads if g is not None]
finite_grads = [ops.all(ops.isfinite(g)) for g in tensor_grads]
return ops.all(ops.convert_to_tensor(finite_grads))
@property
def learning_rate(self):
return self.inner_optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, learning_rate):
self.inner_optimizer.learning_rate = learning_rate
@property
def iterations(self):
return self.inner_optimizer.iterations
def scale_loss(self, loss):
scale = self.dynamic_scale if self.built else self.initial_scale
return ops.multiply(loss, scale)
def finalize_variable_values(self, var_list):
self.inner_optimizer.finalize_variable_values(var_list)
def get_config(self):
# Do not use super().get_config() as only "name" is supported.
inner_optimizer_config = serialization_lib.serialize_keras_object(
self.inner_optimizer
)
return {
"name": self.name,
"inner_optimizer": inner_optimizer_config,
"initial_scale": self.initial_scale,
"dynamic_growth_steps": self.dynamic_growth_steps,
}
@classmethod
def from_config(cls, config, custom_objects=None):
inner_optimizer = serialization_lib.deserialize_keras_object(
config.pop("inner_optimizer"),
custom_objects=custom_objects,
)
return cls(inner_optimizer, **config)
LossScaleOptimizer.__doc__ = LossScaleOptimizer.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/schedules/learning_rate_schedule.py | keras/src/optimizers/schedules/learning_rate_schedule.py | """Various learning rate schedule functions."""
import math
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.saving import serialization_lib
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule:
"""The learning rate schedule base class.
You can use a learning rate schedule to modulate how the learning rate
of your optimizer changes over time.
Several built-in learning rate schedules are available, such as
`keras.optimizers.schedules.ExponentialDecay` or
`keras.optimizers.schedules.PiecewiseConstantDecay`:
```python
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=10000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
```
A `LearningRateSchedule` instance can be passed in as the `learning_rate`
argument of any optimizer.
To implement your own schedule object, you should implement the `__call__`
method, which takes a `step` argument (scalar integer tensor, the
current training step count).
Like for any other Keras object, you can also optionally
make your object serializable by implementing the `get_config`
and `from_config` methods.
Example:
```python
class MyLRSchedule(keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
def __call__(self, step):
return self.initial_learning_rate / (step + 1)
optimizer = keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1))
```
"""
def __call__(self, step):
raise NotImplementedError(
f"Learning rate schedule '{self.__class__.__name__}' "
"must override `__call__(self, step)`."
)
def get_config(self):
raise NotImplementedError(
f"Learning rate schedule '{self.__class__.__name__}' "
"must override `get_config()` in order to be serializable."
)
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A `LearningRateSchedule` that uses an exponential decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A Python float. The initial learning rate.
decay_steps: A Python integer. Must be positive. See the decay
computation above.
decay_rate: A Python float. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals.
name: String. Optional name of the operation. Defaults to
`"ExponentialDecay`".
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name="ExponentialDecay",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
if self.decay_steps <= 0:
raise ValueError(
"Argument `decay_steps` must be > 0. "
f"Received: decay_steps={self.decay_steps}"
)
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
decay_steps = ops.cast(self.decay_steps, dtype)
decay_rate = ops.cast(self.decay_rate, dtype)
global_step_recomp = ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = ops.floor(p)
return ops.multiply(initial_learning_rate, ops.power(decay_rate, p))
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A `LearningRateSchedule` that uses a piecewise constant decay schedule.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = ops.array(0)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
boundaries: A list of Python numbers with strictly increasing
entries, and with all elements having the same type as the
optimizer step.
values: A list of Python numbers that specifies the values for the
intervals defined by `boundaries`. It should have one more
element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
`"PiecewiseConstant"`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`,
..., and `values[-1]` when `step > boundaries[-1]`.
Raises:
ValueError: if the number of elements in the `boundaries` and `values`
lists do not match.
"""
def __init__(self, boundaries, values, name="PiecewiseConstant"):
super().__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of "
f"values. Received: boundaries={boundaries} of length "
f"{len(boundaries)}, and values={values} "
f"of length {len(values)}."
)
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope(self.name):
boundaries = [ops.convert_to_tensor(x) for x in self.boundaries]
values = [ops.convert_to_tensor(x) for x in self.values]
step = ops.convert_to_tensor(step)
for i, b in enumerate(boundaries):
if b.dtype != step.dtype:
# We cast the boundaries to have the same type as the step
b = ops.cast(b, step.dtype)
boundaries[i] = b
result_dtype = values[0].dtype
result_value = ops.array(0, dtype=result_dtype)
# For each range between boundaries, we check whether the step is
# within that range, cast the resulting boolean to a number,
# and multiply the result by the corresponding value for the range.
# Taking the sum of these yields a piecewise constant function.
step_less_than_first_boundary = ops.cast(
step <= boundaries[0], result_dtype
)
result_value += step_less_than_first_boundary * values[0]
step_greater_than_last_boundary = ops.cast(
step > boundaries[-1], result_dtype
)
result_value += step_greater_than_last_boundary * values[-1]
for low, high, value in zip(
boundaries[:-1], boundaries[1:], values[1:-1]
):
# Need to bind v here; can do this with lambda v=v: ...
step_in_range = ops.cast(
(step > low) & (step <= high), result_dtype
)
result_value += step_in_range * value
return result_value
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A `LearningRateSchedule` that uses a polynomial decay schedule.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a backend variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A Python float. The initial learning rate.
decay_steps: A Python integer. Must be positive. See the decay
computation above.
end_learning_rate: A Python float. The minimal end learning rate.
power: A Python float. The power of the polynomial. Defaults to
`1.0`.
cycle: A boolean, whether it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
`"PolynomialDecay"`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name="PolynomialDecay",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
if self.decay_steps <= 0:
raise ValueError(
"Argument `decay_steps` must be > 0. "
f"Received: decay_steps={self.decay_steps}"
)
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
end_learning_rate = ops.cast(self.end_learning_rate, dtype)
power = ops.cast(self.power, dtype)
global_step_recomp = ops.cast(step, dtype)
decay_steps_recomp = ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = ops.where(
ops.equal(global_step_recomp, 0),
1.0,
ops.ceil(global_step_recomp / self.decay_steps),
)
decay_steps_recomp = ops.multiply(
decay_steps_recomp, multiplier
)
else:
# Make sure that the global_step used is not bigger than
# decay_steps.
global_step_recomp = ops.minimum(
global_step_recomp, decay_steps_recomp
)
p = ops.divide(global_step_recomp, decay_steps_recomp)
return ops.add(
ops.multiply(
initial_learning_rate - end_learning_rate,
ops.power(1 - p, power),
),
end_learning_rate,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A `LearningRateSchedule` that uses an inverse time decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a backend variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate /
(1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Args:
initial_learning_rate: A Python float. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as o
pposed to continuous, fashion.
name: String. Optional name of the operation. Defaults to
`"InverseTimeDecay"`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name="InverseTimeDecay",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
if self.decay_steps <= 0:
raise ValueError(
"Argument `decay_steps` must be > 0. "
f"Received: decay_steps={self.decay_steps}"
)
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
decay_steps = ops.cast(self.decay_steps, dtype)
decay_rate = ops.cast(self.decay_rate, dtype)
global_step_recomp = ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = ops.floor(p)
const = ops.cast(ops.array(1), dtype)
denom = ops.add(const, ops.multiply(decay_rate, p))
return ops.divide(initial_learning_rate, denom)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A `LearningRateSchedule` that uses a cosine decay with optional warmup.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
For the idea of a linear warmup of our learning rate,
see [Goyal et al.](https://arxiv.org/pdf/1706.02677.pdf).
When we begin training a model, we often want an initial increase in our
learning rate followed by a decay. If `warmup_target` is an int, this
schedule applies a linear increase per optimizer step to our learning rate
from `initial_learning_rate` to `warmup_target` for a duration of
`warmup_steps`. Afterwards, it applies a cosine decay function taking our
learning rate from `warmup_target` to `warmup_target * alpha` for a
duration of `decay_steps`. If `warmup_target` is None we skip warmup and
our decay will take our learning rate from `initial_learning_rate` to
`initial_learning_rate * alpha`.
It requires a `step` value to compute the learning rate. You can
just pass a backend variable that you increment at each training step.
The schedule is a 1-arg callable that produces a warmup followed by a
decayed learning rate when passed the current optimizer step. This can be
useful for changing the learning rate value across different invocations of
optimizer functions.
Our warmup is computed as:
```python
def warmup_learning_rate(step):
completed_fraction = step / warmup_steps
total_delta = target_warmup - initial_learning_rate
return completed_fraction * total_delta
```
And our decay is computed as:
```python
if warmup_target is None:
initial_decay_lr = initial_learning_rate
else:
initial_decay_lr = warmup_target
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_decay_lr * decayed
```
Example usage without warmup:
```python
decay_steps = 1000
initial_learning_rate = 0.1
lr_decayed_fn = keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps)
```
Example usage with warmup:
```python
decay_steps = 1000
initial_learning_rate = 0
warmup_steps = 1000
target_learning_rate = 0.1
lr_warmup_decayed_fn = keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps, warmup_target=target_learning_rate,
warmup_steps=warmup_steps
)
```
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A Python float. The initial learning rate.
decay_steps: A Python int. Number of steps to decay over.
alpha: A Python float. Minimum learning rate value for decay as a
fraction of `initial_learning_rate`.
name: String. Optional name of the operation. Defaults to
`"CosineDecay"`.
warmup_target: A Python float. The target learning rate for our
warmup phase. Will cast to the `initial_learning_rate` datatype.
Setting to `None` will skip warmup and begins decay phase from
`initial_learning_rate`. Otherwise scheduler will warmup from
`initial_learning_rate` to `warmup_target`.
warmup_steps: A Python int. Number of steps to warmup over.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name="CosineDecay",
warmup_target=None,
warmup_steps=0,
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
self.warmup_steps = warmup_steps
self.warmup_target = warmup_target
if self.decay_steps <= 0:
raise ValueError(
"Argument `decay_steps` must be > 0. "
f"Received: decay_steps={self.decay_steps}"
)
def _decay_function(self, step, decay_steps, decay_from_lr, dtype):
with ops.name_scope(self.name):
completed_fraction = ops.divide(step, decay_steps)
pi = ops.array(math.pi, dtype=dtype)
cosine_decayed = 0.5 * (
1.0 + ops.cos(ops.multiply(pi, completed_fraction))
)
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return ops.multiply(decay_from_lr, decayed)
def _warmup_function(
self, step, warmup_steps, warmup_target, initial_learning_rate
):
with ops.name_scope(self.name):
completed_fraction = step / warmup_steps
total_step_delta = warmup_target - initial_learning_rate
return total_step_delta * completed_fraction + initial_learning_rate
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
decay_steps = ops.cast(self.decay_steps, dtype)
global_step_recomp = ops.cast(step, dtype)
if self.warmup_target is None:
global_step_recomp = ops.minimum(
global_step_recomp, decay_steps
)
return self._decay_function(
global_step_recomp,
decay_steps,
initial_learning_rate,
dtype,
)
warmup_target = ops.cast(self.warmup_target, dtype)
warmup_steps = ops.cast(self.warmup_steps, dtype)
global_step_recomp = ops.minimum(
global_step_recomp, decay_steps + warmup_steps
)
return ops.cond(
global_step_recomp < warmup_steps,
lambda: self._warmup_function(
global_step_recomp,
warmup_steps,
warmup_target,
initial_learning_rate,
),
lambda: self._decay_function(
global_step_recomp - warmup_steps,
decay_steps,
warmup_target,
dtype,
),
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name,
"warmup_target": self.warmup_target,
"warmup_steps": self.warmup_steps,
}
@keras_export("keras.optimizers.schedules.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A `LearningRateSchedule` that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a backend variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times initial learning rate as the new learning rate.
Example:
```python
first_decay_steps = 1000
lr_decayed_fn = (
keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A Python float. The initial learning rate.
first_decay_steps: A Python integer. Number of steps to decay over.
t_mul: A Python float. Used to derive the number of iterations in
the i-th period.
m_mul: A Python float. Used to derive the initial learning rate of
the i-th period.
alpha: A Python float. Minimum learning rate value as a fraction of
the `initial_learning_rate`.
name: String. Optional name of the operation. Defaults to
`"SGDRDecay"`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name="SGDRDecay",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
if self.first_decay_steps <= 0:
raise ValueError(
"Argument `first_decay_steps` must be > 0. "
f"Received: first_decay_steps={self.first_decay_steps}"
)
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
first_decay_steps = ops.cast(self.first_decay_steps, dtype)
alpha = ops.cast(self.alpha, dtype)
t_mul = ops.cast(self._t_mul, dtype)
m_mul = ops.cast(self._m_mul, dtype)
global_step_recomp = ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
# ops.log is sensitive to the precision of dtype, so we need
# the additional casting
i_restart = ops.floor(
ops.log(
ops.cast(
1.0 - completed_fraction * (1.0 - t_mul), dtype
)
)
/ ops.log(t_mul)
)
sum_r = ops.divide(
1.0 - ops.power(t_mul, i_restart), (1.0 - t_mul)
)
completed_fraction = ops.divide(
ops.subtract(completed_fraction, sum_r),
ops.power(t_mul, i_restart),
)
else:
i_restart = ops.floor(completed_fraction)
completed_fraction -= i_restart
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/schedules/__init__.py | keras/src/optimizers/schedules/__init__.py | from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/optimizers/schedules/learning_rate_schedule_test.py | keras/src/optimizers/schedules/learning_rate_schedule_test.py | """Tests for learning rate schedule API."""
import math
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import optimizers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.optimizers import schedules
class TestFitLRSchedulesFlow(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_fit_lr_correctness(self):
model = Sequential(
[
layers.Dense(
2, kernel_initializer="ones", bias_initializer="ones"
)
]
)
optimizer = optimizers.Adam(
learning_rate=schedules.ExponentialDecay(
initial_learning_rate=0.05, decay_steps=1, decay_rate=0.9
)
)
self.assertEqual(len(optimizer.variables), 1)
self.assertEqual(optimizer.variables[0], 0)
model.compile(optimizer=optimizer, loss="mse")
x = np.arange(32).reshape((16, 2))
y = np.arange(32).reshape((16, 2))
history = model.fit(x, y, epochs=3, batch_size=4, shuffle=False)
self.assertEqual(optimizer.variables[0], 4 * 3)
self.assertAllClose(
history.history["loss"],
[230.79457092285156, 128.30319213867188, 79.33648681640625],
rtol=5e-5,
tpu_atol=5e-3,
tpu_rtol=5e-3,
)
class ExponentialDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.ExponentialDecay(
initial_learning_rate=0.05,
decay_steps=10,
decay_rate=0.96,
staircase=True,
name="my_ed",
)
)
def test_continuous(self):
step = 5
decayed_lr = schedules.ExponentialDecay(0.05, 10, 0.96)
expected = 0.05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_staircase(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = 0.1
self.assertAllClose(decayed_lr(step), expected, 1e-6)
expected = 0.1
step.assign(2)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
# Decayed learning rate
expected = 0.1 * 0.96 ** (100 // 3)
step.assign(100)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_variables(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate
step.assign(1)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
step.assign(2)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
# Decayed learning rate
step.assign(100)
expected = 0.1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class PiecewiseConstantDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.PiecewiseConstantDecay(
boundaries=[10, 20], values=[1, 2, 3], name="my_pcd"
)
)
def test_piecewise_values(self):
x = backend.Variable(-999.0)
decayed_lr = schedules.PiecewiseConstantDecay(
[100, 110, 120], [1.0, 0.1, 0.01, 0.001]
)
self.assertAllClose(decayed_lr(x), 1.0, 1e-6)
x.assign(100)
self.assertAllClose(decayed_lr(x), 1.0, 1e-6)
x.assign(105)
self.assertAllClose(decayed_lr(x), 0.1, 1e-6)
x.assign(110)
self.assertAllClose(decayed_lr(x), 0.1, 1e-6)
x.assign(120)
self.assertAllClose(decayed_lr(x), 0.01, 1e-6)
x.assign(999)
self.assertAllClose(decayed_lr(x), 0.001, 1e-6)
def test_boundary_values(self):
# Test casting boundaries from int32 to int64.
x_int64 = backend.Variable(0, dtype="int64", trainable=False)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = schedules.PiecewiseConstantDecay(boundaries, values)
self.assertAllClose(decayed_lr(x_int64), 0.4, 1e-6)
x_int64.assign(1)
self.assertAllClose(decayed_lr(x_int64), 0.4, 1e-6)
x_int64.assign(2)
self.assertAllClose(decayed_lr(x_int64), 0.5, 1e-6)
x_int64.assign(3)
self.assertAllClose(decayed_lr(x_int64), 0.6, 1e-6)
x_int64.assign(4)
self.assertAllClose(decayed_lr(x_int64), 0.7, 1e-6)
class LinearDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.PolynomialDecay(
initial_learning_rate=0.1,
decay_steps=100,
end_learning_rate=0.005,
power=1.0,
cycle=False,
name="my_ld",
)
)
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class SqrtDecayTest(testing.TestCase):
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(
lr, 10, end_lr, power=power, cycle=True
)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_begin_with_cycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = schedules.PolynomialDecay(lr, decay_steps, cycle=True)
expected = lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class InverseTimeDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.InverseTimeDecay(
initial_learning_rate=0.05,
decay_steps=10,
decay_rate=0.96,
staircase=True,
name="my_itd",
)
)
def test_decay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = backend.Variable(0.0)
decayed_lr = schedules.InverseTimeDecay(initial_lr, k, decay_rate)
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
step.assign(step + 1)
def test_staircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = backend.Variable(0.0)
decayed_lr = schedules.InverseTimeDecay(
initial_lr, k, decay_rate, staircase=True
)
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(decayed_lr(step), expected, 1e-6)
step.assign(step + 1)
class CosineDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.CosineDecay(
initial_learning_rate=0.05,
decay_steps=10,
alpha=0.1,
warmup_target=0.2,
warmup_steps=2,
name="my_cd",
)
)
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def test_decay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(initial_lr, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def linear_warmup(self, step, warmup_steps, initial_lr, target_lr):
completed_fraction = step / warmup_steps
total_delta = target_lr - initial_lr
return completed_fraction * total_delta
def test_warmup(self):
warmup_steps = 1500
initial_lr = 0.0
target_lr = 10.0
for step in range(0, 1500, 250):
lr = schedules.CosineDecay(
initial_lr,
10,
warmup_target=target_lr,
warmup_steps=warmup_steps,
)
expected = self.linear_warmup(
step, warmup_steps, initial_lr, target_lr
)
self.assertAllClose(lr(step), expected)
def test_alpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(
initial_lr, num_training_steps, alpha
)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_float64(self):
num_training_steps = 1000
initial_lr = np.float64(1.0)
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(initial_lr, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_warmup_decay(self):
warmup_steps = 2000
decay_steps = 1000
initial_lr = 0.0
target_lr = 10.0
for step in range(0, 3000, 250):
lr = schedules.CosineDecay(
initial_lr,
decay_steps,
warmup_target=target_lr,
warmup_steps=warmup_steps,
)
if step < warmup_steps + 1:
expected = self.linear_warmup(
step, warmup_steps, initial_lr, target_lr
)
else:
expected = target_lr * self.np_cosine_decay(
step - warmup_steps, decay_steps
)
self.assertAllClose(lr(step), expected)
class CosineDecayRestartsTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.CosineDecayRestarts(
initial_learning_rate=0.05,
first_decay_steps=10,
alpha=0.1,
t_mul=3.0,
m_mul=4.0,
name="my_cdr",
)
)
def np_cosine_decay_restarts(
self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0
):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def test_decay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps
)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_float64(self):
num_training_steps = 1000
initial_lr = np.float64(1.0)
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps
)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_alpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, alpha=alpha
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_mmul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, m_mul=m_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_tmul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, t_mul=t_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/dtype_policies/dtype_policy_map_test.py | keras/src/dtype_policies/dtype_policy_map_test.py | import numpy as np
import pytest
from keras.src import dtype_policies
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
class DTypePolicyMapTest(testing.TestCase):
def setUp(self):
super().setUp()
self._global_dtype_policy = dtype_policy()
def tearDown(self):
super().tearDown()
set_dtype_policy(self._global_dtype_policy)
@pytest.mark.requires_trainable_backend
def test_basic_usage(self):
# Create a subclass that might contain mixing dtype policies for
# sublayers.
# It is important to ensure that `dtype` is passed to sublayers and
# that each sublayer has a unique `name`.
@saving.register_keras_serializable()
class Subclass(layers.Layer):
def __init__(self, dtype=None, name="subclass", **kwargs):
super().__init__(dtype=dtype, name=name, **kwargs)
self.dense = layers.Dense(8, dtype=dtype, name=f"{name}_dense")
self.bn = layers.BatchNormalization(
dtype=dtype, name=f"{name}_bn"
)
self.relu = layers.ReLU(dtype=dtype, name=f"{name}_relu")
def call(self, inputs, training=None):
return self.relu(self.bn(self.dense(inputs), training=training))
def get_config(self):
# Typically, we only need to record the quantized policy for
# `DTypePolicyMap`
config = super().get_config()
dtype_policy_map = DTypePolicyMap()
for layer in self._flatten_layers():
if layer.quantization_mode is not None:
dtype_policy_map[layer.path] = layer.dtype_policy
if len(dtype_policy_map) > 0:
config.update({"dtype": dtype_policy_map})
return config
# Instantiate the model
inputs = layers.Input([4])
outputs = Subclass()(inputs)
model = models.Model(inputs, outputs)
# Quantize the model to make mixing of dtype policies in sublayers
model.quantize("int8")
for layer in model._flatten_layers():
if isinstance(layer, layers.Dense):
self.assertEqual(
layer.dtype_policy,
dtype_policies.QuantizedDTypePolicy("int8"),
)
elif isinstance(layer, layers.BatchNormalization):
self.assertEqual(
layer.dtype_policy, dtype_policies.DTypePolicy()
)
elif isinstance(layer, layers.ReLU):
self.assertEqual(
layer.dtype_policy, dtype_policies.DTypePolicy()
)
# Verify the output after saving and loading
x = np.random.uniform(size=[16, 4])
temp_dir = self.get_temp_dir()
y = model(x, training=False)
model.save(f"{temp_dir}/model.keras")
reloaded_model = saving.load_model(f"{temp_dir}/model.keras")
reloaded_y = reloaded_model(x, training=False)
self.assertAllClose(y, reloaded_y)
def test_add(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
dtype_policy_map["layer/dense_2"] = (
dtype_policies.QuantizedFloat8DTypePolicy("float8", "mixed_float16")
)
self.assertLen(dtype_policy_map, 3)
policy = dtype_policy_map["layer/dense_0"]
self.assertIsInstance(policy, dtype_policies.DTypePolicy)
self.assertEqual(policy.name, "bfloat16")
policy = dtype_policy_map["layer/dense_1"]
self.assertIsInstance(policy, dtype_policies.QuantizedDTypePolicy)
self.assertEqual(policy._source_name, "mixed_bfloat16")
self.assertEqual(policy.quantization_mode, "int8")
policy = dtype_policy_map["layer/dense_2"]
self.assertIsInstance(policy, dtype_policies.QuantizedFloat8DTypePolicy)
self.assertEqual(policy._source_name, "mixed_float16")
self.assertEqual(policy.quantization_mode, "float8")
with self.assertRaisesRegex(
ValueError, "layer/dense_0 already exist in the DTypePolicyMap"
):
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"float32"
)
with self.assertRaisesRegex(
ValueError, "Cannot interpret the assigned value."
):
dtype_policy_map["layer/dense_3"] = 123
def test_get(self):
# 1. Setup
bfloat16_policy = dtype_policies.DTypePolicy("bfloat16")
int8_policy = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
float32_policy = dtype_policies.DTypePolicy("float32")
float16_policy = dtype_policies.DTypePolicy("float16")
policy_map = DTypePolicyMap()
# Policy for an exact layer path
policy_map["model/encoder/layer_0/dense"] = bfloat16_policy
# Policy for a layer that is also a prefix of another layer's name
policy_map["model/encoder/attention/query"] = int8_policy
# Regex policies for entire scopes MUST include wildcards
policy_map["model/decoder/.*"] = float32_policy
policy_map["model/decoder/attention/.*"] = float16_policy
# 2. Test exact match
self.assertEqual(
policy_map["model/encoder/layer_0/dense"], bfloat16_policy
)
self.assertEqual(
policy_map["model/encoder/attention/query"], int8_policy
)
# 3. Test successful regex fallback (explicit wildcard)
# "model/decoder/.*" should match its children.
self.assertEqual(policy_map["model/decoder/layer_0"], float32_policy)
# 4. Test that partial matches are ignored
# The exact key "model/encoder/attention/query" should not match
# "model/encoder/attention/query_norm" without a wildcard.
self.assertEqual(
policy_map["model/encoder/attention/query_norm"],
policy_map.default_policy,
)
# A plain key "model/decoder" will not match "model/decoder/layer_0"
policy_map["model/decoder"] = bfloat16_policy # Add exact key
self.assertEqual(policy_map["model/decoder/layer_0"], float32_policy)
# Still matches the more general regex
self.assertEqual(policy_map["model/decoder"], bfloat16_policy)
# 5. Test no match
self.assertEqual(
policy_map["model/embedding"], policy_map.default_policy
)
# 6. Test multiple regex matches causing a ValueError
# "model/decoder/attention/output" matches two regex patterns:
# - "model/decoder/.*"
# - "model/decoder/attention/.*"
with self.assertRaisesRegex(
ValueError,
"Path 'model/decoder/attention/output' matches multiple "
"dtype policy",
):
_ = policy_map["model/decoder/attention/output"]
def test_delete(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertEqual(
dtype_policy_map.pop("layer/dense_0"),
dtype_policies.DTypePolicy("bfloat16"),
)
with self.assertRaises(KeyError):
dtype_policy_map.pop("layer/dense_0")
# Test `del`, causing no hit
del dtype_policy_map["layer/dense_1"]
self.assertEqual(
dtype_policy_map["layer/dense_1"], dtype_policy_map.default_policy
)
self.assertLen(dtype_policy_map, 0)
def test_len(self):
dtype_policy_map = DTypePolicyMap()
self.assertLen(dtype_policy_map, 0)
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertLen(dtype_policy_map, 2)
def test_iter(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertEqual(
list(dtype_policy_map.keys()), ["layer/dense_0", "layer/dense_1"]
)
keys = []
values = []
for k, v in dtype_policy_map.items():
keys.append(k)
values.append(v)
self.assertEqual(keys, ["layer/dense_0", "layer/dense_1"])
self.assertEqual(
values,
[
dtype_policies.DTypePolicy("bfloat16"),
dtype_policies.QuantizedDTypePolicy("int8", "mixed_bfloat16"),
],
)
def test_in(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertTrue("layer/dense_0" in dtype_policy_map)
self.assertTrue("layer/dense_1" in dtype_policy_map)
self.assertFalse("layer/dense_2" in dtype_policy_map)
def test_default_policy(self):
# Test default_policy is set to `"float32"`
dtype_policy_map = DTypePolicyMap(default_policy="mixed_bfloat16")
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policy_map.get_config()
dtype_policy_map = DTypePolicyMap.from_config(config)
self.assertEqual(
dtype_policy_map["layer/dense_0"],
dtype_policies.DTypePolicy("mixed_bfloat16"),
)
self.assertEqual(
dtype_policy_map["layer/dense_1"],
dtype_policies.QuantizedDTypePolicy("int8", "mixed_bfloat16"),
)
# No hit, defers to `dtype_policy_map.default_policy`
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policy_map.default_policy
)
# Test that default_policy defers to `keras.config.dtype_policy()`
# during loading
set_dtype_policy("bfloat16")
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policy_map.get_config()
dtype_policy_map = DTypePolicyMap.from_config(config)
self.assertEqual(
dtype_policy_map["layer/dense_0"],
dtype_policies.DTypePolicy("bfloat16"),
)
self.assertEqual(
dtype_policy_map["layer/dense_1"],
dtype_policies.QuantizedDTypePolicy("int8", "bfloat16"),
)
# No hit, defers to `dtype_policy_map.default_policy` which is
# `keras.config.dtype_policy()`
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policy_map.default_policy
)
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policies.get("bfloat16")
)
def test_serialization(self):
dtype_policy_map = DTypePolicyMap(default_policy="mixed_bfloat16")
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policies.serialize(dtype_policy_map)
reloaded_dtype_policy_map = dtype_policies.deserialize(config)
self.assertEqual(
dtype_policy_map.default_policy,
reloaded_dtype_policy_map.default_policy,
)
for k, v in dtype_policy_map.items():
self.assertEqual(reloaded_dtype_policy_map[k], v)
# Test that config remains intact during deserialization
config = dtype_policy_map.get_config()
original_config = config.copy()
DTypePolicyMap.from_config(config)
self.assertDictEqual(config, original_config)
def test_repr(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
repr_str = repr(dtype_policy_map)
self.assertTrue("DTypePolicyMap" in repr_str)
self.assertTrue("default_policy" in repr_str)
self.assertTrue(
"mapping=[('layer/dense_0', 'mixed_bfloat16')]" in repr_str
)
def test_invalid_policy_map(self):
with self.assertRaisesRegex(
TypeError, "If specified, `policy_map` must be a dict."
):
DTypePolicyMap(policy_map=123)
with self.assertRaisesRegex(
TypeError, "If specified, `policy_map` must be a dict."
):
DTypePolicyMap(
policy_map=dtype_policies.DTypePolicy("mixed_bfloat16")
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/dtype_policies/dtype_policy.py | keras/src/dtype_policies/dtype_policy.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
QUANTIZATION_MODES = ("int8", "float8", "int4", "gptq")
@keras_export(
[
"keras.DTypePolicy",
"keras.dtype_policies.DTypePolicy",
"keras.mixed_precision.DTypePolicy", # Legacy
"keras.mixed_precision.Policy", # Legacy
]
)
class DTypePolicy:
"""A dtype policy for a Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`keras.config.set_dtype_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes.
Can be any dtype name, such as `"float32"` or `"float64"`,
which causes both the compute and variable dtypes
will be that dtype.
Can also be the string `"mixed_float16"` or `"mixed_bfloat16"`,
which causes the compute dtype to be `float16` or `bfloat16`
and the variable dtype to be `float32`.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `"mixed_float16"` or
`"mixed_bfloat16"` to `keras.mixed_precision.set_dtype_policy()`.
>>> keras.config.set_dtype_policy("mixed_float16")
>>> layer1 = keras.layers.Dense(10)
>>> layer1.dtype_policy # layer1 will automatically use mixed precision
<DTypePolicy "mixed_float16">
>>> # Can optionally override layer to use float32
>>> # instead of mixed precision.
>>> layer2 = keras.layers.Dense(10, dtype="float32")
>>> layer2.dtype_policy
<DTypePolicy "float32">
>>> # Set policy back to initial float32.
>>> keras.config.set_dtype_policy('float32')
In the example above, passing `dtype="float32"` to the layer is
equivalent to passing
`dtype=keras.config.DTypePolicy("float32")`.
In general, passing a dtype policy name to a layer is equivalent
to passing the corresponding policy, so it is never necessary
to explicitly construct a `DTypePolicy` object.
"""
def __init__(self, name=None):
# Use the global dtype policy if `name` is not specified
if name is None:
name = dtype_policy().name
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
self._quantization_mode = None
def _parse_name(self, name):
"""Parses a `DTypePolicy` name into a compute and variable dtype.
Args:
name: The name of the policy.
Returns:
The `(compute_dtype, variable_dtype)` pair.
"""
if not isinstance(name, str):
raise TypeError(
"'name' must be a string, such as 'mixed_float16'. "
f"Received: name={name} (of type {type(name)})"
)
if name == "mixed_float16":
return "float16", "float32"
elif name == "mixed_bfloat16":
return "bfloat16", "float32"
try:
dtype = backend.standardize_dtype(name)
return dtype, dtype
except ValueError:
raise ValueError(
f"Cannot convert '{name}' to a mixed precision "
"DTypePolicy. Valid policies include 'mixed_float16', "
"'mixed_bfloat16', and the name of any float dtype such as "
"'float32'."
)
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`DTypePolicy.compute_dtype`, Layers will cast variables to
the compute dtype to avoid type errors.
Variable regularizers are run in the variable dtype, not the compute
dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware
devices may not do individual adds, multiplies, and other fundamental
operations in float16 or bfloat16, but instead may do some of them in
float32 for numeric stability. The compute dtype is the dtype of the
inputs and outputs of the ops that the layer executes.
Internally, many ops will do certain internal calculations in
float32 or some other device-internal intermediate format with higher
precision than float16/bfloat16, to increase numeric stability.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
@property
def quantization_mode(self):
"""The quantization mode of this policy.
Returns:
The quantization mode of this policy, as a string. If this policy is
not quantized, it will return `None`.
"""
return self._quantization_mode
def convert_input(self, x, autocast, dtype):
"""Converts the input dtype based on `autocast` and `dtype`.
Note that `x` can be a tensor, symbolic tensor or numpy array, and this
method will keep integer inputs untouched and only apply casting to
floats.
"""
dtype = backend.standardize_dtype(dtype)
if backend.is_tensor(x):
if self._should_cast(x, autocast, dtype):
x = backend.cast(x, dtype=dtype)
return x
elif backend.is_keras_tensor(x):
if self._should_cast(x, autocast, dtype):
x = ops.cast(x, dtype=dtype)
return x
elif hasattr(x, "__array__"):
try:
x = backend.convert_to_tensor(x)
except TypeError:
x = backend.convert_to_tensor(x, dtype=dtype)
if self._should_cast(x, autocast, dtype):
x = backend.cast(x, dtype=dtype)
return x
return x
def get_config(self):
return {"name": self.name}
@classmethod
def from_config(cls, config):
return cls(**config)
def __repr__(self):
class_name = self.__class__.__name__
if class_name == "FloatDTypePolicy":
class_name = "DTypePolicy"
return f'<{class_name} "{self._name}">'
def __eq__(self, other):
if self.__class__ in (DTypePolicy, FloatDTypePolicy):
if type(other) not in (DTypePolicy, FloatDTypePolicy):
return False
else:
if type(other) is not self.__class__:
return False
return self._name == other._name
def _should_cast(self, x, autocast, dtype):
x_dtype = backend.standardize_dtype(x.dtype)
if autocast and backend.is_float_dtype(x_dtype) and x_dtype != dtype:
return True
else:
return False
@keras_export(
["keras.FloatDTypePolicy", "keras.dtype_policies.FloatDTypePolicy"]
)
class FloatDTypePolicy(DTypePolicy):
# An alias for `DTypePolicy`
pass
@keras_export("keras.dtype_policies.QuantizedDTypePolicy")
class QuantizedDTypePolicy(DTypePolicy):
def __init__(self, mode, source_name=None):
# Use the global dtype policy if `source_name` is not specified
if source_name is None:
source_name = dtype_policy().name
name = f"{mode}_from_{source_name}"
self._compute_dtype, self._variable_dtype = self._parse_name(
source_name
)
self._check_quantization_mode(mode, self._compute_dtype)
self._name = name
self._source_name = source_name
self._quantization_mode = mode
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return (
self._quantization_mode == other._quantization_mode
and self._source_name == other._source_name
)
def get_config(self):
return {
"mode": self._quantization_mode,
"source_name": self._source_name,
}
def _check_quantization_mode(self, mode, compute_dtype):
if mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. "
f"Received: mode={mode}"
)
if compute_dtype == "float16" and mode == "int8":
raise ValueError(
f"Quantization mode='{mode}' doesn't work well with "
"compute_dtype='float16'."
)
@keras_export("keras.dtype_policies.QuantizedFloat8DTypePolicy")
class QuantizedFloat8DTypePolicy(QuantizedDTypePolicy):
default_amax_history_length = 1024
def __init__(self, mode, source_name=None, amax_history_length=1024):
super().__init__(mode=mode, source_name=source_name)
if not isinstance(amax_history_length, int):
raise TypeError(
"`amax_history_length` must be an integer. "
f"Received: amax_history_length={amax_history_length}"
)
self._amax_history_length = amax_history_length
@property
def amax_history_length(self):
"""The length of the amax history window.
This property is used for scaling factor computation in float8 training.
"""
return self._amax_history_length
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return self._amax_history_length == other._amax_history_length
def get_config(self):
config = super().get_config()
config.update({"amax_history_length": self.amax_history_length})
return config
@keras_export("keras.dtype_policies.GPTQDTypePolicy")
class GPTQDTypePolicy(QuantizedDTypePolicy):
"""Quantized dtype policy for GPTQ quantization.
This policy helps propagate quantization settings for GPTQ
when loading a GPTQ quantized model in Keras format.
Args:
mode: The quantization mode. This should be a string in the format
`"gptq/<weight_bits>/<group_size>"`.
- `"gptq"`: The identifier for the quantization algorithm.
- `<weight_bits>`: Number of bits to quantize weights to.
Supported values are 2, 3, 4, and 8.
- `<group_size>`: The group size for quantization. Supported
values are -1 (for whole-tensor quantization) or any
positive integer. Typically a smaller group size leads
to better accuracy but slower speed.
Example: `"gptq/4/128"`.
source_name: The source dtype policy name, e.g. "float32".
"""
def __init__(
self,
mode,
source_name=None,
):
parts = mode.split("/")
expected_format = "'gptq/<weight_bits>/<group_size>'"
# Validate format
if len(parts) != 3 or parts[0] != "gptq":
raise ValueError(
"Invalid mode for GPTQDTypePolicy. Expected format "
f"{expected_format}, but got '{mode}'."
)
# Validate and cast weight_bits and group_size
try:
weight_bits = int(parts[1])
group_size = int(parts[2])
except ValueError:
raise ValueError(
"Invalid mode for GPTQDTypePolicy. <weight_bits> and "
"<group_size> must be integers. Expected format "
f"{expected_format}, but got '{mode}'."
)
# Validate supported values
if weight_bits not in [2, 3, 4, 8]:
raise ValueError(
"Invalid weight_bits in mode. Supported values are "
f"2, 3, 4, and 8, but got {weight_bits} from '{mode}'."
)
if group_size < -1 or group_size == 0:
raise ValueError(
"Invalid group_size in mode. Supported values are "
"-1 (whole-tensor) or a positive integer, "
f"but got {group_size} from '{mode}'."
)
base_mode = parts[0]
super().__init__(
mode=base_mode,
source_name=source_name,
)
self._name = f"{mode}_from_{source_name}"
self.mode = base_mode
self.weight_bits = weight_bits
self.group_size = group_size
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return (
self.weight_bits == other.weight_bits
and self.group_size == other.group_size
)
def get_config(self):
config = super().get_config()
# Reconstruct the full mode string for serialization
mode = f"{self.mode}/{self.weight_bits}/{self.group_size}"
config.update({"mode": mode})
return config
@keras_export(
[
"keras.config.set_dtype_policy",
"keras.mixed_precision.set_dtype_policy", # Legacy
"keras.mixed_precision.set_global_policy", # Legacy
]
)
def set_dtype_policy(policy):
"""Sets the default dtype policy globally.
Example:
>>> keras.config.set_dtype_policy("mixed_float16")
"""
if not isinstance(policy, DTypePolicy):
if isinstance(policy, str):
if policy.startswith(QUANTIZATION_MODES):
policy = _get_quantized_dtype_policy_by_str(policy)
else:
policy = DTypePolicy(policy)
else:
raise ValueError(
"Invalid `policy` argument. "
"Expected the string name of a policy "
"(such as 'mixed_float16') or a `DTypePolicy` "
f"instance. Received: policy={policy} "
f"(of type {type(policy)})"
)
global_state.set_global_attribute("dtype_policy", policy)
@keras_export(
[
"keras.config.dtype_policy",
"keras.mixed_precision.dtype_policy", # Legacy
"keras.mixed_precision.global_policy", # Legacy
]
)
def dtype_policy():
"""Returns the current default dtype policy object."""
policy = global_state.get_global_attribute("dtype_policy", None)
if policy is None:
policy = DTypePolicy(backend.floatx())
set_dtype_policy(policy)
return policy
def _get_quantized_dtype_policy_by_str(policy):
if not isinstance(policy, str):
raise TypeError(f"`policy` must be a string. Received: policy={policy}")
if not policy.startswith(QUANTIZATION_MODES):
raise ValueError(
"`policy` is incompatible with the current supported quantization."
)
split_name = policy.split("_from_")
if len(split_name) != 2:
raise ValueError(
"Cannot convert `policy` into a valid pair (`mode`, `source_name`) "
"to instantiate `QuantizedDTypePolicy`. "
f"Received: policy={policy}"
)
mode, source_name = split_name
if policy.startswith("int8") or policy.startswith("int4"):
return QuantizedDTypePolicy(mode, source_name)
elif policy.startswith("gptq"):
return GPTQDTypePolicy(mode, source_name)
elif policy.startswith("float8"):
return QuantizedFloat8DTypePolicy(mode, source_name)
else:
raise NotImplementedError
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/dtype_policies/__init__.py | keras/src/dtype_policies/__init__.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import GPTQDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
DTypePolicyMap,
GPTQDTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...DTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "DTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...DTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return DTypePolicy(identifier)
try:
return DTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/dtype_policies/dtype_policy_map.py | keras/src/dtype_policies/dtype_policy_map.py | import re
from collections.abc import MutableMapping
from keras.src import dtype_policies
from keras.src.api_export import keras_export
from keras.src.dtype_policies import DTypePolicy
@keras_export(["keras.dtype_policies.DTypePolicyMap"])
class DTypePolicyMap(DTypePolicy, MutableMapping):
"""Dict-like object mapping layer paths to `DTypePolicy` instances.
`DTypePolicyMap` can be used in `get_config` in layers and subclasses to
support a complex configurations of dtype policies.
For example, we can modify `get_config` in `layers.MultiHeadAttention` as
follows to support the mixing of dtype policies, such as quantization.
```python
@keras.saving.register_keras_serializable("MyPackage")
class MyMultiHeadAttention(keras.layers.MultiHeadAttention):
def get_config(self):
config = super().get_config()
dtype_policy_map = dtype_policies.DTypePolicyMap()
for layer in self._flatten_layers():
if layer.dtype_policy.quantization_mode is not None:
dtype_policy_map[layer.path] = layer.dtype_policy
if len(dtype_policy_map) > 0:
config.update({"dtype": dtype_policy_map})
return config
```
Internally, `DTypePolicyMap` uses a string as a key and a `DTypePolicy`
as the value. Typically, the key used for querying is the `Layer.path`.
However, it is also possible to set a regex as the key. See the docstring of
`get` for more details.
Args:
default_policy: An optional `DTypePolicy` instance specifying the
default dtype policy. If not specified, the value will default to
`keras.config.dtype_policy()`.
policy_map: An optional dict that maps string to `DTypePolicy`
instances. Defaults to `None`
Example:
```python
>>> from keras.src import dtype_policies
>>> bfloat16 = dtype_policies.DTypePolicy("bfloat16")
>>> float16 = dtype_policies.DTypePolicy("float16")
>>> float32 = dtype_policies.DTypePolicy("float32")
>>> policy_map = DTypePolicyMap(default_policy=float32)
# Set policies using an exact path and a regex pattern.
# Note: "decoder" will only match the exact path, not its children.
>>> policy_map["encoder/layer_0/dense"] = bfloat16
>>> policy_map["encoder/.*"] = float16
>>> policy_map["decoder"] = bfloat16
# 1. An exact match is found and returned directly.
>>> policy_map["encoder/layer_0/dense"].name
'bfloat16'
# 2. A regex match is found for a child layer.
# It matches the "encoder/.*" pattern.
>>> policy_map["encoder/attention/query"].name
'float16'
# 3. No implicit prefix matching occurs.
# "decoder/attention" does not match the key "decoder".
# The default policy is returned.
>>> policy_map["decoder/attention"].name
'float32'
# 4. A ValueError is raised if a path matches multiple patterns.
>>> policy_map["encoder/attention/.*"] = bfloat16
# "encoder/attention/query" now matches two patterns:
# - "encoder/.*"
# - "encoder/attention/.*"
>>> try:
... policy_map["encoder/attention/query"]
... except ValueError as e:
... print(e)
Path 'encoder/attention/query' matches multiple dtype policy ..
```
"""
def __init__(self, default_policy=None, policy_map=None):
if isinstance(default_policy, DTypePolicyMap):
raise ValueError("`default_policy` cannot be a `DTypePolicyMap`.")
if policy_map is not None and not isinstance(policy_map, dict):
raise TypeError(
"If specified, `policy_map` must be a dict. "
f"Received: policy_map={policy_map} of type {type(policy_map)}"
)
self._default_policy_arg = default_policy
self._default_policy = dtype_policies.get(default_policy)
self._policy_map = policy_map or dict()
@property
def name(self):
return f"map_{self.default_policy._name}"
@property
def default_policy(self):
"""The default dtype policy.
If `default_policy` is not specified in the constructor, this property
will be `keras.config.dtype_policy()`.
"""
return dtype_policies.get(self._default_policy)
@property
def variable_dtype(self):
return self.default_policy.variable_dtype
@property
def compute_dtype(self):
return self.default_policy.compute_dtype
@property
def quantization_mode(self):
return self.default_policy.quantization_mode
def __getitem__(self, key):
"""Retrieves the corresponding `DTypePolicy` by the string key.
This method first attempts an exact key match. If no exact match is
found, it treats all keys in the map as regular expression patterns
and uses `re.fullmatch` to find a policy.
For example, to apply a policy to all sublayers of an `encoder` block,
the key should be explicitly set to `"encoder/.*"`. A key of
`"encoder"` will only match the layer with that exact path.
Args:
key: str. The key to query for a `DTypePolicy`.
Returns:
The corresponding `DTypePolicy`. If no match is found, this method
returns `self.default_policy`.
Raises:
ValueError: If the `key` matches more than one regex pattern in the
map.
Example:
```python
>>> from keras.src import dtype_policies
>>> bfloat16 = dtype_policies.DTypePolicy("bfloat16")
>>> float16 = dtype_policies.DTypePolicy("float16")
>>> float32 = dtype_policies.DTypePolicy("float32")
>>> policy_map = DTypePolicyMap(default_policy=float32)
# Set policies using an exact path and a regex pattern.
# Note: "decoder" will only match the exact path, not its children.
>>> policy_map["encoder/layer_0/dense"] = bfloat16
>>> policy_map["encoder/.*"] = float16
>>> policy_map["decoder"] = bfloat16
# 1. An exact match is found and returned directly.
>>> policy_map["encoder/layer_0/dense"].name
'bfloat16'
# 2. A regex match is found for a child layer.
# It matches the "encoder/.*" pattern.
>>> policy_map["encoder/attention/query"].name
'float16'
# 3. No implicit prefix matching occurs.
# "decoder/attention" does not match the key "decoder".
# The default policy is returned.
>>> policy_map["decoder/attention"].name
'float32'
# 4. A ValueError is raised if a path matches multiple patterns.
>>> policy_map["encoder/attention/.*"] = bfloat16
# "encoder/attention/query" now matches two patterns:
# - "encoder/.*"
# - "encoder/attention/.*"
>>> try:
... policy_map["encoder/attention/query"]
... except ValueError as e:
... print(e)
Path 'encoder/attention/query' matches multiple dtype policy ..
```
"""
# 1. Check for an exact match.
if key in self._policy_map:
return self._policy_map[key]
# 2. Fallback to a full regex match.
matching_keys = [
pattern
for pattern in self._policy_map
if re.fullmatch(pattern, key)
]
# 3. Handle cases based on the number of matches found.
if len(matching_keys) > 1:
raise ValueError(
f"Path '{key}' matches multiple dtype policy "
f"specification keys: {matching_keys}. Please make "
"sure each path only matches at most "
"one dtype policy specification key in the DTypePolicyMap."
)
elif len(matching_keys) == 1:
return self._policy_map[matching_keys[0]]
# 4. If there were no matches, return the default.
return self.default_policy
def __setitem__(self, key, policy):
"""Insert `DTypePolicy` to the `DTypePolicyMap`.
Args:
key: String key for the `DTypePolicy`.
policy: The `DTypePolicy`.
"""
if key in self._policy_map:
raise ValueError(
f"{key} already exist in the DTypePolicyMap with "
f"value {self._policy_map[key]}. Please make sure to "
"not use duplicated keys."
)
try:
policy = dtype_policies.get(policy)
except Exception:
raise ValueError(
"Cannot interpret the assigned value by "
"`keras.dtype_policies.get`. "
f"Received: {policy} of type {type(policy)}"
)
self._policy_map[key] = policy
def __delitem__(self, key):
# Let the dict to handle the key missing error
return self._policy_map.pop(key)
def __contains__(self, key):
return key in self._policy_map
def get_config(self):
from keras.src.saving import serialization_lib
policy_map = self._policy_map
if self._default_policy_arg is None:
# `default_policy=None` enables us to defer to
# `keras.config.dtype_policy()` during loading.
# To support this feature, we can set `_name` and `_source_name` to
# `None` in `DTypePolicy` and `QuantizedDTypePolicy`,
# respectively.
for policy in policy_map.values():
if isinstance(policy, dtype_policies.QuantizedDTypePolicy):
policy._name = None
policy._source_name = None
elif isinstance(policy, dtype_policies.DTypePolicy):
policy._name = None
return {
"default_policy": self._default_policy_arg,
"policy_map": serialization_lib.serialize_keras_object(policy_map),
}
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.src.saving import serialization_lib
config = config.copy()
config["policy_map"] = serialization_lib.deserialize_keras_object(
config["policy_map"], custom_objects=custom_objects
)
return cls(**config)
def __len__(self):
return len(self._policy_map)
def __iter__(self):
return iter(self._policy_map)
def __repr__(self):
default_policy = (
self._default_policy.name
if self._default_policy is not None
else None
)
mapping = []
for k, v in self._policy_map.items():
mapping.append((k, v.name))
return (
f"<DTypePolicyMap at {hex(id(self))} "
f"default_policy={default_policy}, "
f"mapping={mapping}>"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/dtype_policies/dtype_policy_test.py | keras/src/dtype_policies/dtype_policy_test.py | from absl.testing import parameterized
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.testing import test_case
class DTypePolicyTest(test_case.TestCase):
"""Test `DTypePolicy`.
In the tests, we also test `DTypePolicy` for historical reasons.
"""
def setUp(self):
"""Record the global dtype policy before each test."""
super().setUp()
self._global_dtype_policy = dtype_policy()
def tearDown(self):
super().tearDown()
"""Restore the global dtype policy after each test."""
set_dtype_policy(self._global_dtype_policy)
def test_initialization_valid_name(self):
"""Test initialization with a valid name."""
policy = DTypePolicy("mixed_float16")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.variable_dtype, "float32")
policy = FloatDTypePolicy("mixed_float16")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.variable_dtype, "float32")
@parameterized.named_parameters(
("float32", "float32", "float32", "float32"),
("float16", "float16", "float16", "float16"),
("bfloat16", "bfloat16", "bfloat16", "bfloat16"),
("mixed_float16", "mixed_float16", "float16", "float32"),
("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"),
)
def test_initialization_from_global(
self,
global_dtype_policy,
expected_compute_dtype,
expected_variable_dtype,
):
set_dtype_policy(global_dtype_policy)
policy = DTypePolicy(name=None)
self.assertEqual(policy.name, global_dtype_policy)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
policy = FloatDTypePolicy(name=None)
self.assertEqual(policy.name, global_dtype_policy)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
def test_initialization_invalid_name(self):
"""Test initialization with an invalid name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("invalid_name")
with self.assertRaisesRegex(ValueError, "Cannot convert"):
FloatDTypePolicy("invalid_name")
def test_initialization_non_string_name(self):
"""Test initialization with a non-string name."""
with self.assertRaisesRegex(TypeError, "'name' must be a string"):
DTypePolicy(123)
with self.assertRaisesRegex(TypeError, "'name' must be a string"):
FloatDTypePolicy(123)
def test_properties_mixed_float16(self):
"""Test properties for 'mixed_float16'."""
policy = DTypePolicy("mixed_float16")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.variable_dtype, "float32")
policy = FloatDTypePolicy("mixed_float16")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.variable_dtype, "float32")
def test_properties_mixed_bfloat16(self):
"""Test properties for 'mixed_bfloat16'."""
policy = DTypePolicy("mixed_bfloat16")
self.assertEqual(policy.compute_dtype, "bfloat16")
self.assertEqual(policy.variable_dtype, "float32")
policy = FloatDTypePolicy("mixed_bfloat16")
self.assertEqual(policy.compute_dtype, "bfloat16")
self.assertEqual(policy.variable_dtype, "float32")
def test_initialization_with_invalid_name_behaviour(self):
"""Test initialization behavior with an invalid name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("invalid_name")
with self.assertRaisesRegex(ValueError, "Cannot convert"):
FloatDTypePolicy("invalid_name")
def test_properties(self):
"""Test variable_dtype, compute_dtype, and name properties."""
policy = DTypePolicy("mixed_float16")
self.assertEqual(policy.variable_dtype, "float32")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.name, "mixed_float16")
self.assertIsNone(policy.quantization_mode)
policy = FloatDTypePolicy("mixed_float16")
self.assertEqual(policy.variable_dtype, "float32")
self.assertEqual(policy.compute_dtype, "float16")
self.assertEqual(policy.name, "mixed_float16")
self.assertIsNone(policy.quantization_mode)
def test_properties_uint8(self):
"""Test properties for 'uint8'."""
policy = DTypePolicy("uint8")
self.assertEqual(policy.compute_dtype, "uint8")
self.assertEqual(policy.variable_dtype, "uint8")
self.assertEqual(policy.name, "uint8")
policy = FloatDTypePolicy("uint8")
self.assertEqual(policy.compute_dtype, "uint8")
self.assertEqual(policy.variable_dtype, "uint8")
self.assertEqual(policy.name, "uint8")
def test_repr(self):
"""Test __repr__ method."""
policy = DTypePolicy("mixed_float16")
self.assertEqual(repr(policy), '<DTypePolicy "mixed_float16">')
policy = FloatDTypePolicy("mixed_float16")
self.assertEqual(repr(policy), '<DTypePolicy "mixed_float16">')
def test_get_config_from_config(self):
"""Test get_config and from_config methods."""
# Test DTypePolicy
policy = DTypePolicy("mixed_float16")
config = policy.get_config()
self.assertEqual(config, {"name": "mixed_float16"})
new_policy = DTypePolicy.from_config(config)
self.assertEqual(new_policy.name, "mixed_float16")
# Test FloatDTypePolicy
policy = FloatDTypePolicy("mixed_float16")
config = policy.get_config()
self.assertEqual(config, {"name": "mixed_float16"})
new_policy = FloatDTypePolicy.from_config(config)
self.assertEqual(new_policy.name, "mixed_float16")
def test_serialization(self):
# Test DTypePolicy
policy = DTypePolicy("mixed_float16")
config = serialize(policy)
reloaded_policy = deserialize(config)
self.assertEqual(policy.name, reloaded_policy.name)
reloaded_policy = get(config)
self.assertEqual(policy.name, reloaded_policy.name)
# Test FloatDTypePolicy
policy = FloatDTypePolicy("mixed_float16")
config = serialize(policy)
reloaded_policy = deserialize(config)
self.assertEqual(policy.name, reloaded_policy.name)
reloaded_policy = get(config)
self.assertEqual(policy.name, reloaded_policy.name)
def test_python_serialization(self):
"""Test builtin serialization methods."""
import copy
import pickle
# Test DTypePolicy
policy = DTypePolicy("mixed_float16")
# copy.deepcopy
copied_policy = copy.deepcopy(policy)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
# copy.copy
copied_policy = copy.copy(policy)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
# pickle
temp_dir = self.get_temp_dir()
with open(f"{temp_dir}/policy.pickle", "wb") as f:
pickle.dump(policy, f)
with open(f"{temp_dir}/policy.pickle", "rb") as f:
copied_policy = pickle.load(f)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
# Test FloatDTypePolicy
policy = FloatDTypePolicy("mixed_float16")
# copy.deepcopy
copied_policy = copy.deepcopy(policy)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
# copy.copy
copied_policy = copy.copy(policy)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
# pickle
temp_dir = self.get_temp_dir()
with open(f"{temp_dir}/policy.pickle", "wb") as f:
pickle.dump(policy, f)
with open(f"{temp_dir}/policy.pickle", "rb") as f:
copied_policy = pickle.load(f)
self.assertEqual(repr(copied_policy), '<DTypePolicy "mixed_float16">')
def test_eq(self):
policy = DTypePolicy("mixed_bfloat16")
# Test True
self.assertEqual(policy, DTypePolicy("mixed_bfloat16"))
self.assertEqual(policy, FloatDTypePolicy("mixed_bfloat16"))
# Test False
self.assertNotEqual(policy, "mixed_float16")
self.assertNotEqual(
policy, QuantizedDTypePolicy("int8", "mixed_bfloat16")
)
class QuantizedDTypePolicyTest(test_case.TestCase):
def setUp(self):
"""Record the global dtype policy before each test."""
super().setUp()
self._global_dtype_policy = dtype_policy()
def tearDown(self):
super().tearDown()
"""Restore the global dtype policy after each test."""
set_dtype_policy(self._global_dtype_policy)
@parameterized.named_parameters(
("float32", "float32", "float32", "float32"),
("bfloat16", "bfloat16", "bfloat16", "bfloat16"),
("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"),
)
def test_initialization_for_int8(
self, source_name, expected_compute_dtype, expected_variable_dtype
):
name = f"int8_from_{source_name}"
policy = QuantizedDTypePolicy(mode="int8", source_name=source_name)
self.assertEqual(policy.name, name)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
self.assertEqual(repr(policy), f'<QuantizedDTypePolicy "{name}">')
@parameterized.named_parameters(
("float32", "float32", "float32", "float32"),
("bfloat16", "bfloat16", "bfloat16", "bfloat16"),
("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"),
)
def test_initialization_for_int8_from_global(
self,
global_dtype_policy,
expected_compute_dtype,
expected_variable_dtype,
):
set_dtype_policy(global_dtype_policy)
expected_name = f"int8_from_{global_dtype_policy}"
policy = QuantizedDTypePolicy(mode="int8", source_name=None)
self.assertEqual(policy.name, expected_name)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
@parameterized.named_parameters(
("float32", "float32", "float32", "float32"),
("float16", "float16", "float16", "float16"),
("bfloat16", "bfloat16", "bfloat16", "bfloat16"),
("mixed_float16", "mixed_float16", "float16", "float32"),
("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"),
)
def test_initialization_for_float8(
self, source_name, expected_compute_dtype, expected_variable_dtype
):
name = f"float8_from_{source_name}"
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name=source_name
)
self.assertEqual(policy.name, name)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
self.assertEqual(repr(policy), f'<QuantizedFloat8DTypePolicy "{name}">')
@parameterized.named_parameters(
("float32", "float32", "float32", "float32"),
("float16", "float16", "float16", "float16"),
("bfloat16", "bfloat16", "bfloat16", "bfloat16"),
("mixed_float16", "mixed_float16", "float16", "float32"),
("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"),
)
def test_initialization_for_float8_from_global(
self,
global_dtype_policy,
expected_compute_dtype,
expected_variable_dtype,
):
set_dtype_policy(global_dtype_policy)
expected_name = f"float8_from_{global_dtype_policy}"
policy = QuantizedFloat8DTypePolicy(mode="float8", source_name=None)
self.assertEqual(policy.name, expected_name)
self.assertEqual(policy.compute_dtype, expected_compute_dtype)
self.assertEqual(policy.variable_dtype, expected_variable_dtype)
@parameterized.named_parameters(
("abc", "abc"),
("abc_from_def", "def"),
)
def test_initialization_with_invalid_name(self, invalid_name):
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedDTypePolicy(mode="int8", source_name=invalid_name)
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedFloat8DTypePolicy(mode="float8", source_name=invalid_name)
@parameterized.named_parameters(
("int7", "int7"),
("float7", "float7"),
)
def test_initialization_with_invalid_mode(self, invalid_mode):
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
QuantizedDTypePolicy(mode=invalid_mode)
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
QuantizedFloat8DTypePolicy(mode=invalid_mode)
@parameterized.named_parameters(
("int8_from_float16", "float16"),
("int8_from_mixed_float16", "mixed_float16"),
)
def test_initialization_with_invalid_compute_dtype(self, invalid_name):
with self.assertRaisesRegex(ValueError, "doesn't work well"):
QuantizedDTypePolicy(mode="int8", source_name=invalid_name)
def test_initialization_non_string_name(self):
"""Test initialization with a non-string name."""
with self.assertRaisesRegex(TypeError, "'name' must be a string"):
QuantizedDTypePolicy(mode="int8", source_name=123)
with self.assertRaisesRegex(TypeError, "'name' must be a string"):
QuantizedFloat8DTypePolicy(mode="float8", source_name=123)
def test_properties(self):
# Test int8
policy = QuantizedDTypePolicy(mode="int8", source_name="mixed_bfloat16")
self.assertEqual(policy.variable_dtype, "float32")
self.assertEqual(policy.compute_dtype, "bfloat16")
self.assertEqual(policy.name, "int8_from_mixed_bfloat16")
self.assertEqual(policy.quantization_mode, "int8")
# Test float8
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name="mixed_bfloat16"
)
self.assertEqual(policy.variable_dtype, "float32")
self.assertEqual(policy.compute_dtype, "bfloat16")
self.assertEqual(policy.name, "float8_from_mixed_bfloat16")
self.assertEqual(policy.quantization_mode, "float8")
self.assertEqual(policy.amax_history_length, 1024)
# Test float8 with amax_history_length
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name="mixed_bfloat16", amax_history_length=512
)
self.assertEqual(policy.amax_history_length, 512)
# Test float8 default_amax_history_length
self.assertEqual(
QuantizedFloat8DTypePolicy.default_amax_history_length, 1024
)
def test_invalid_properties_for_float8(self):
with self.assertRaisesRegex(TypeError, "must be an integer."):
QuantizedFloat8DTypePolicy(
mode="float8", source_name="float32", amax_history_length="512"
)
with self.assertRaisesRegex(TypeError, "must be an integer."):
QuantizedFloat8DTypePolicy(
mode="float8", source_name="float32", amax_history_length=512.0
)
def test_get_config_from_config(self):
"""Test get_config and from_config methods."""
# Test QuantizedDTypePolicy
policy = QuantizedDTypePolicy(mode="int8", source_name="mixed_bfloat16")
config = policy.get_config()
self.assertEqual(
config, {"mode": "int8", "source_name": "mixed_bfloat16"}
)
new_policy = QuantizedDTypePolicy.from_config(config)
self.assertEqual(new_policy.name, "int8_from_mixed_bfloat16")
# Test QuantizedFloat8DTypePolicy
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name="mixed_bfloat16"
)
config = policy.get_config()
self.assertEqual(
config,
{
"mode": "float8",
"source_name": "mixed_bfloat16",
"amax_history_length": 1024,
},
)
new_policy = QuantizedFloat8DTypePolicy.from_config(config)
self.assertEqual(new_policy.name, "float8_from_mixed_bfloat16")
def test_serialization(self):
# Test QuantizedDTypePolicy
policy = QuantizedDTypePolicy(mode="int8", source_name="float32")
config = serialize(policy)
reloaded_policy = deserialize(config)
self.assertEqual(policy.name, reloaded_policy.name)
reloaded_policy = get(config)
self.assertEqual(policy.name, reloaded_policy.name)
# Test QuantizedFloat8DTypePolicy
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name="float32"
)
config = serialize(policy)
reloaded_policy = deserialize(config)
self.assertEqual(policy.name, reloaded_policy.name)
reloaded_policy = get(config)
self.assertEqual(policy.name, reloaded_policy.name)
@parameterized.named_parameters(
(
"int8_from_mixed_bfloat16",
"int8",
"mixed_bfloat16",
'<QuantizedDTypePolicy "int8_from_mixed_bfloat16">',
),
(
"float8_from_mixed_bfloat16",
"float8",
"mixed_bfloat16",
'<QuantizedFloat8DTypePolicy "float8_from_mixed_bfloat16">',
),
)
def test_python_serialization(self, mode, source_name, repr_str):
import copy
import pickle
if mode == "int8":
policy = QuantizedDTypePolicy(mode=mode, source_name=source_name)
else:
policy = QuantizedFloat8DTypePolicy(
mode=mode, source_name=source_name, amax_history_length=123
)
# copy.deepcopy
copied_policy = copy.deepcopy(policy)
self.assertEqual(repr(copied_policy), repr_str)
if mode == "float8":
self.assertEqual(copied_policy.amax_history_length, 123)
# copy.copy
copied_policy = copy.copy(policy)
self.assertEqual(repr(copied_policy), repr_str)
if mode == "float8":
self.assertEqual(copied_policy.amax_history_length, 123)
# pickle
temp_dir = self.get_temp_dir()
with open(f"{temp_dir}/policy.pickle", "wb") as f:
pickle.dump(policy, f)
with open(f"{temp_dir}/policy.pickle", "rb") as f:
copied_policy = pickle.load(f)
self.assertEqual(repr(copied_policy), repr_str)
if mode == "float8":
self.assertEqual(copied_policy.amax_history_length, 123)
def test_serialization_for_float8(self):
policy = QuantizedFloat8DTypePolicy(
mode="float8", source_name="mixed_float16"
)
config = serialize(policy)
reloaded_policy = deserialize(config)
self.assertEqual(policy.name, reloaded_policy.name)
self.assertEqual(
policy.amax_history_length, reloaded_policy.amax_history_length
)
# Test `dtype_policies.get`
reloaded_policy = get(config)
self.assertEqual(policy.name, reloaded_policy.name)
self.assertEqual(
policy.amax_history_length, reloaded_policy.amax_history_length
)
def test_eq(self):
policy = QuantizedDTypePolicy("int8", "mixed_bfloat16")
# Test True
self.assertEqual(policy, QuantizedDTypePolicy("int8", "mixed_bfloat16"))
# Test False
self.assertNotEqual(policy, "mixed_bfloat16")
self.assertNotEqual(policy, DTypePolicy("mixed_bfloat16"))
self.assertNotEqual(
policy, QuantizedFloat8DTypePolicy("float8", "mixed_bfloat16")
)
@parameterized.named_parameters(
("int8_from_mixed_bfloat16", "int8_from_mixed_bfloat16"),
("float8_from_mixed_bfloat16", "float8_from_mixed_bfloat16"),
)
def test_get_quantized_dtype_policy_by_str(self, name):
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
policy = _get_quantized_dtype_policy_by_str(name)
self.assertEqual(policy.name, name)
def test_invalid_get_quantized_dtype_policy_by_str(self):
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
with self.assertRaisesRegex(TypeError, "must be a string."):
_get_quantized_dtype_policy_by_str(123)
with self.assertRaisesRegex(
ValueError,
"is incompatible with the current supported quantization.",
):
_get_quantized_dtype_policy_by_str("float7")
class DTypePolicyGlobalFunctionsTest(test_case.TestCase):
def setUp(self):
"""Reset the global dtype policy before each test."""
set_dtype_policy("float32")
def test_set_dtype_policy_valid_string(self):
"""Test set_dtype_policy with a valid string."""
set_dtype_policy("mixed_float16")
policy = dtype_policy()
self.assertEqual(policy.name, "mixed_float16")
def test_set_dtype_policy_valid_string_quantized(self):
"""Test set_dtype_policy with a valid string."""
set_dtype_policy("int8_from_mixed_bfloat16")
policy = dtype_policy()
self.assertEqual(policy.name, "int8_from_mixed_bfloat16")
def test_set_dtype_policy_valid_policy(self):
"""Test set_dtype_policy with a valid DTypePolicy object."""
policy_obj = DTypePolicy("mixed_float16")
set_dtype_policy(policy_obj)
policy = dtype_policy()
self.assertEqual(policy.name, "mixed_float16")
def test_set_dtype_policy_valid_policy_quantized(self):
"""Test set_dtype_policy with a valid QuantizedDTypePolicy object."""
policy_obj = QuantizedDTypePolicy(
mode="int8", source_name="mixed_bfloat16"
)
set_dtype_policy(policy_obj)
policy = dtype_policy()
self.assertEqual(policy.name, "int8_from_mixed_bfloat16")
def test_set_dtype_policy_invalid(self):
"""Test set_dtype_policy with an invalid input."""
with self.assertRaisesRegex(ValueError, "Invalid `policy` argument"):
set_dtype_policy(12345)
def test_dtype_policy_default(self):
"""Test dtype_policy default value."""
policy = dtype_policy()
self.assertEqual(policy.name, "float32")
def test_get_valid_policy(self):
policy = get("bfloat16")
self.assertEqual(policy.name, "bfloat16")
policy = get("mixed_float16")
self.assertEqual(policy.name, "mixed_float16")
policy = get(DTypePolicy("bfloat16"))
self.assertEqual(policy.name, "bfloat16")
policy = get(FloatDTypePolicy("mixed_float16"))
self.assertEqual(policy.name, "mixed_float16")
def test_get_valid_policy_quantized(self):
policy = get("int8_from_mixed_bfloat16")
self.assertEqual(policy.name, "int8_from_mixed_bfloat16")
policy = get("float8_from_float32")
self.assertEqual(policy.name, "float8_from_float32")
policy = get(QuantizedDTypePolicy("int8", "mixed_bfloat16"))
self.assertEqual(policy.name, "int8_from_mixed_bfloat16")
policy = get(QuantizedFloat8DTypePolicy("float8", "mixed_float16"))
self.assertEqual(policy.name, "float8_from_mixed_float16")
def test_get_invalid_policy(self):
with self.assertRaisesRegex(ValueError, "Cannot convert"):
get("mixed_bfloat15")
with self.assertRaisesRegex(
ValueError, "Cannot interpret `dtype` argument."
):
get(123)
def test_get_invalid_policy_quantized(self):
with self.assertRaisesRegex(ValueError, "Cannot convert"):
get("int8_from_mixed_bfloat15")
with self.assertRaisesRegex(ValueError, "Cannot convert"):
get("int8_from_")
with self.assertRaisesRegex(
ValueError, "Cannot convert `policy` into a valid pair"
):
get("int8_abc_")
class DTypePolicyEdgeCasesTest(test_case.TestCase):
def test_empty_name(self):
"""Test initialization with an empty name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("")
def test_special_character_name(self):
"""Test initialization with special characters in the name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("@mixed_float16!")
def test_very_long_name(self):
"""Test initialization with a very long name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("mixed_float16" * 100)
def test_almost_valid_name(self):
"""Test initialization with a name close to a valid one."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
DTypePolicy("mixed_float15")
class QuantizedDTypePolicyEdgeCasesTest(test_case.TestCase):
def test_empty_name(self):
"""Test initialization with an empty name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedDTypePolicy(mode="int8", source_name="")
def test_special_character_name(self):
"""Test initialization with special characters in the name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedDTypePolicy(
mode="int8", source_name="@int8_from_mixed_bfloat16!"
)
def test_very_long_name(self):
"""Test initialization with a very long name."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedDTypePolicy(
mode="int8", source_name="int8_from_mixed_bfloat16" * 100
)
def test_almost_valid_name(self):
"""Test initialization with a name close to a valid one."""
with self.assertRaisesRegex(ValueError, "Cannot convert"):
QuantizedDTypePolicy(
mode="int8", source_name="int7_from_mixed_bfloat16"
)
class DTypePolicyGlobalFunctionsEdgeCasesTest(test_case.TestCase):
def setUp(self):
"""Reset the global dtype policy before each test."""
set_dtype_policy("float32")
def test_set_policy_multiple_times(self):
"""Test setting the policy multiple times in a row."""
set_dtype_policy("mixed_float16")
policy = dtype_policy()
self.assertEqual(policy.name, "mixed_float16")
set_dtype_policy("float32")
policy = dtype_policy()
self.assertEqual(policy.name, "float32")
def test_set_policy_none(self):
"""Test setting the policy to None."""
with self.assertRaisesRegex(ValueError, "Invalid `policy` argument"):
set_dtype_policy(None)
class GPTQConfigErrorHandlingTest(test_case.TestCase):
"""Test error handling in GPTQConfig."""
def test_invalid_weight_bits(self):
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(
dataset=None,
tokenizer=None,
weight_bits=5,
)
def test_negative_num_samples(self):
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive integer."
):
GPTQConfig(
dataset=None,
tokenizer=None,
num_samples=-10,
)
def test_zero_sequence_length(self):
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive integer."
):
GPTQConfig(
dataset=None,
tokenizer=None,
sequence_length=0,
)
def test_invalid_hessian_damping(self):
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between 0 and 1."
):
GPTQConfig(
dataset=None,
tokenizer=None,
hessian_damping=1.5,
)
def test_invalid_group_size(self):
with self.assertRaisesRegex(
ValueError, "Invalid group_size. Supported values are -1"
):
GPTQConfig(
dataset=None,
tokenizer=None,
group_size=0,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/losses/losses.py | keras/src/losses/losses.py | import warnings
from keras.src import backend
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.losses.loss import Loss
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.saving import serialization_lib
from keras.src.utils.numerical_utils import build_pos_neg_masks
from keras.src.utils.numerical_utils import normalize
class LossFunctionWrapper(Loss):
def __init__(
self,
fn,
reduction="sum_over_batch_size",
name=None,
dtype=None,
**kwargs,
):
super().__init__(name=name, reduction=reduction, dtype=dtype)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
y_true_y_pred = tree.map_structure(
squeeze_or_expand_to_same_rank, y_true, y_pred
)
y_true = tree.map_structure_up_to(y_true, lambda x: x[0], y_true_y_pred)
y_pred = tree.map_structure_up_to(y_pred, lambda x: x[1], y_true_y_pred)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = super().get_config()
config.update({"fn": serialization_lib.serialize_keras_object(self.fn)})
config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))
return config
@classmethod
def from_config(cls, config):
if "fn" in config:
config = serialization_lib.deserialize_keras_object(config)
return cls(**config)
def __repr__(self):
return f"<LossFunctionWrapper({self.fn}, kwargs={self._fn_kwargs})>"
@keras_export("keras.losses.MeanSquaredError")
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
Formula:
```python
loss = mean(square(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_squared_error",
dtype=None,
):
super().__init__(
mean_squared_error, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.MeanAbsoluteError")
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
Formula:
```python
loss = mean(abs(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_absolute_error",
dtype=None,
):
super().__init__(
mean_absolute_error, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.MeanAbsolutePercentageError")
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` & `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_absolute_percentage_error",
dtype=None,
):
super().__init__(
mean_absolute_percentage_error,
name=name,
reduction=reduction,
dtype=dtype,
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.MeanSquaredLogarithmicError")
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_squared_logarithmic_error",
dtype=None,
):
super().__init__(
mean_squared_logarithmic_error,
name=name,
reduction=reduction,
dtype=dtype,
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.CosineSimilarity")
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` & `y_pred`.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. This makes it usable as a loss function in a
setting where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity
will be 0 regardless of the proximity between predictions and targets.
Formula:
```python
loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
```
Args:
axis: The axis along which the cosine similarity is computed
(the features axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
axis=-1,
reduction="sum_over_batch_size",
name="cosine_similarity",
dtype=None,
):
super().__init__(
cosine_similarity,
name=name,
reduction=reduction,
dtype=dtype,
axis=axis,
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.Huber")
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` & `y_pred`.
Formula:
```python
for x in error:
if abs(x) <= delta:
loss.append(0.5 * x^2)
elif abs(x) > delta:
loss.append(delta * abs(x) - 0.5 * delta^2)
loss = mean(loss, axis=-1)
```
See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
delta=1.0,
reduction="sum_over_batch_size",
name="huber_loss",
dtype=None,
):
super().__init__(
huber,
name=name,
reduction=reduction,
dtype=dtype,
delta=delta,
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.LogCosh")
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
error = y_pred - y_true
logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)`
```
where x is the error `y_pred - y_true`.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="log_cosh",
dtype=None,
):
super().__init__(log_cosh, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.Hinge")
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(1 - y_true * y_pred, 0)
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="hinge",
dtype=None,
):
super().__init__(hinge, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.SquaredHinge")
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = square(maximum(1 - y_true * y_pred, 0))
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self, reduction="sum_over_batch_size", name="squared_hinge", dtype=None
):
super().__init__(
squared_hinge, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.CategoricalHinge")
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(neg - pos + 1, 0)
```
where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="categorical_hinge",
dtype=None,
):
super().__init__(
categorical_hinge, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.KLDivergence")
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_true * log(y_true / y_pred)
```
`y_true` and `y_pred` are expected to be probability
distributions, with values between 0 and 1. They will get
clipped to the `[0, 1]` range.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self, reduction="sum_over_batch_size", name="kl_divergence", dtype=None
):
super().__init__(
kl_divergence, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.Poisson")
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_pred - y_true * log(y_pred)
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self, reduction="sum_over_batch_size", name="poisson", dtype=None
):
super().__init__(poisson, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.BinaryCrossentropy")
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
`from_logits=False`).
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` is probabilities (i.e., values in [0, 1]).
label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs.
When > 0, we compute the loss between the predicted labels
and a smoothed version of the true labels, where the smoothing
squeezes the labels towards 0.5. Larger values of
`label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Examples:
**Recommended Usage:** (set `from_logits=True`)
With `compile()` API:
```python
model.compile(
loss=keras.losses.BinaryCrossentropy(from_logits=True),
...
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = np.array([0, 1, 0, 0])
>>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.8654
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = np.array([[0, 1], [0, 0]])
>>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.8654
>>> # Using 'sample_weight' attribute
>>> bce(y_true, y_pred, sample_weight=[0.8, 0.2])
0.243
>>> # Using 'sum' reduction` type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
... reduction="sum")
>>> bce(y_true, y_pred)
1.730
>>> # Using 'none' reduction type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=None)
>>> bce(y_true, y_pred)
array([0.235, 1.496], dtype=float32)
**Default Usage:** (set `from_logits=False`)
>>> # Make the following updates to the above "Recommended Usage" section
>>> # 1. Set `from_logits=False`
>>> keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
>>> # 2. Update `y_pred` to use probabilities instead of logits
>>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
"""
def __init__(
self,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="binary_crossentropy",
dtype=None,
):
super().__init__(
binary_crossentropy,
name=name,
reduction=reduction,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
config = Loss.get_config(self)
config.update(
{
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
)
return config
@keras_export("keras.losses.BinaryFocalCrossentropy")
class BinaryFocalCrossentropy(LossFunctionWrapper):
"""Computes focal cross-entropy loss between true labels and predictions.
Binary cross-entropy loss is often used for binary (0 or 1) classification
tasks. The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when
`from_logits=False`).
According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
helps to apply a "focal factor" to down-weight easy examples and focus more
on hard examples. By default, the focal tensor is computed as follows:
`focal_factor = (1 - output) ** gamma` for class 1
`focal_factor = output ** gamma` for class 0
where `gamma` is a focusing parameter. When `gamma=0`, this function is
equivalent to the binary crossentropy loss.
Args:
apply_class_balancing: A bool, whether to apply weight balancing on the
binary classes 0 and 1.
alpha: A weight balancing factor for class 1, default is `0.25` as
mentioned in reference [Lin et al., 2018](
https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
`1.0 - alpha`.
gamma: A focusing parameter used to compute the focal factor, default is
`2.0` as mentioned in the reference
[Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).
label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.
When > `0`, we compute the loss between the predicted labels
and a smoothed version of the true labels, where the smoothing
squeezes the labels towards `0.5`.
Larger values of `label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Examples:
With the `compile()` API:
```python
model.compile(
loss=keras.losses.BinaryFocalCrossentropy(
gamma=2.0, from_logits=True),
...
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = np.array([0, 1, 0, 0])
>>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])
>>> loss = keras.losses.BinaryFocalCrossentropy(
... gamma=2, from_logits=True)
>>> loss(y_true, y_pred)
0.691
>>> # Apply class weight
>>> loss = keras.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=2, from_logits=True)
>>> loss(y_true, y_pred)
0.51
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = np.array([[0, 1], [0, 0]])
>>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> loss = keras.losses.BinaryFocalCrossentropy(
... gamma=3, from_logits=True)
>>> loss(y_true, y_pred)
0.647
>>> # Apply class weight
>>> loss = keras.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=3, from_logits=True)
>>> loss(y_true, y_pred)
0.482
>>> # Using 'sample_weight' attribute with focal effect
>>> loss = keras.losses.BinaryFocalCrossentropy(
... gamma=3, from_logits=True)
>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
0.133
>>> # Apply class weight
>>> loss = keras.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=3, from_logits=True)
>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
0.097
>>> # Using 'sum' reduction` type.
>>> loss = keras.losses.BinaryFocalCrossentropy(
... gamma=4, from_logits=True,
... reduction="sum")
>>> loss(y_true, y_pred)
1.222
>>> # Apply class weight
>>> loss = keras.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=4, from_logits=True,
... reduction="sum")
>>> loss(y_true, y_pred)
0.914
>>> # Using 'none' reduction type.
>>> loss = keras.losses.BinaryFocalCrossentropy(
... gamma=5, from_logits=True,
... reduction=None)
>>> loss(y_true, y_pred)
array([0.0017 1.1561], dtype=float32)
>>> # Apply class weight
>>> loss = keras.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=5, from_logits=True,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/losses/loss.py | keras/src/losses/loss.py | from keras.src import backend
from keras.src import dtype_policies
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils.naming import auto_name
@keras_export(["keras.Loss", "keras.losses.Loss"])
class Loss(KerasSaveable):
"""Loss base class.
This is the class to subclass in order to create new custom losses.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`,
`y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
return ops.mean(ops.square(y_pred - y_true), axis=-1)
```
"""
def __init__(self, name=None, reduction="sum_over_batch_size", dtype=None):
self.name = name or auto_name(self.__class__.__name__)
self.reduction = standardize_reduction(reduction)
self._dtype_policy = dtype_policies.get(dtype or backend.floatx())
self._dtype = self._dtype_policy.compute_dtype
@property
def dtype(self):
return self._dtype
def __call__(self, y_true, y_pred, sample_weight=None):
in_mask = backend.get_keras_mask(y_pred)
with ops.name_scope(self.name):
y_pred = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred
)
y_true = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true
)
losses = self.call(y_true, y_pred)
out_mask = backend.get_keras_mask(losses)
if in_mask is not None and out_mask is not None:
mask = in_mask & out_mask
elif in_mask is not None:
mask = in_mask
elif out_mask is not None:
mask = out_mask
else:
mask = None
return reduce_weighted_values(
losses,
sample_weight=sample_weight,
mask=mask,
reduction=self.reduction,
dtype=self.dtype,
)
def call(self, y_true, y_pred):
raise NotImplementedError
def get_config(self):
return {"name": self.name, "reduction": self.reduction}
@classmethod
def from_config(cls, config):
return cls(**config)
def _obj_type(self):
return "Loss"
def standardize_reduction(reduction):
allowed = {
"sum_over_batch_size",
"sum",
None,
"none",
"mean",
"mean_with_sample_weight",
}
if reduction not in allowed:
raise ValueError(
"Invalid value for argument `reduction`. "
f"Expected one of {allowed}. Received: "
f"reduction={reduction}"
)
return reduction
def squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1=True):
"""Squeeze/expand last dim if ranks differ from expected by exactly 1."""
x1_rank = len(x1.shape)
x2_rank = len(x2.shape)
if x1_rank == x2_rank:
return x1, x2
if x1_rank == x2_rank + 1:
if x1.shape[-1] == 1:
if x2_rank == 1 and expand_rank_1:
x2 = ops.expand_dims(x2, axis=-1)
else:
x1 = ops.squeeze(x1, axis=-1)
if x2_rank == x1_rank + 1:
if x2.shape[-1] == 1:
if x1_rank == 1 and expand_rank_1:
x1 = ops.expand_dims(x1, axis=-1)
else:
x2 = ops.squeeze(x2, axis=-1)
return x1, x2
def reduce_values(values, sample_weight=None, reduction="sum_over_batch_size"):
if (
reduction is None
or reduction == "none"
or tuple(values.shape) == ()
or tuple(values.shape) == (0,)
):
return values
loss = ops.sum(values)
if reduction in ("sum_over_batch_size", "mean", "mean_with_sample_weight"):
if reduction == "mean_with_sample_weight" and sample_weight is not None:
divisor = ops.cast(ops.sum(sample_weight), loss.dtype)
else:
divisor = ops.cast(
ops.prod(
ops.convert_to_tensor(ops.shape(values), dtype="int32")
),
loss.dtype,
)
loss = ops.divide_no_nan(loss, divisor)
loss = scale_loss_for_distribution(loss)
return loss
def reduce_weighted_values(
values,
sample_weight=None,
mask=None,
reduction="sum_over_batch_size",
dtype=None,
):
reduction = standardize_reduction(reduction)
values = ops.convert_to_tensor(values, dtype=dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype)
if mask is not None:
mask = ops.convert_to_tensor(mask, dtype=dtype)
# Merge mask and sample weight into sample weight.
sample_weight = apply_mask(
sample_weight, mask, dtype=values.dtype, reduction=reduction
)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, values.dtype)
# Update dimensions of `sample_weight` to match `losses`.
values, sample_weight = squeeze_or_expand_to_same_rank(
values, sample_weight
)
values = values * sample_weight
# Apply reduction function to the individual weighted losses.
loss = reduce_values(values, sample_weight, reduction)
return loss
def apply_mask(sample_weight, mask, dtype, reduction):
"""Applies any mask on predictions to sample weights."""
if mask is not None:
mask = ops.cast(mask, dtype=dtype)
if reduction in ("mean", "sum_over_batch_size"):
# Valid entries have weight `total/valid`, while invalid ones
# have 0. When summed over batch, they will be reduced to:
#
# mean(loss * sample_weight * total / valid)
# = sum(loss * sample_weight * total / valid) / total
# = sum(loss * sample_weight) / total * total / valid
# = sum(loss * sample_weight) / valid
total = ops.cast(
ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype="int32")),
dtype,
)
valid = ops.sum(mask) # May be 0!
mask *= ops.divide_no_nan(total, valid)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, dtype=dtype)
mask, sample_weight = squeeze_or_expand_to_same_rank(
mask, sample_weight
)
sample_weight *= mask
else:
sample_weight = mask
return sample_weight
def scale_loss_for_distribution(value):
"""Scales the given value by the number of replicas in the strategy.
Currently, this function is only effective when using the tensorflow backend
and `tf.distribute`.
"""
if backend.backend() == "tensorflow":
import tensorflow as tf
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas > 1:
value = ops.multiply(
value, ops.cast(1.0 / num_replicas, value.dtype)
)
return value
def unscale_loss_for_distribution(value):
"""Unscales the given value by the number of replicas in the strategy.
Currently, this function is only effective when using the tensorflow backend
and `tf.distribute`.
"""
if backend.backend() == "tensorflow":
import tensorflow as tf
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas > 1:
value = ops.multiply(value, ops.cast(num_replicas, value.dtype))
return value
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/losses/__init__.py | keras/src/losses/__init__.py | import inspect
from keras.src.api_export import keras_export
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import Circle
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import LossFunctionWrapper
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import circle
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
# Base
Loss,
LossFunctionWrapper,
# Probabilistic
KLDivergence,
Poisson,
BinaryCrossentropy,
BinaryFocalCrossentropy,
CategoricalCrossentropy,
CategoricalFocalCrossentropy,
SparseCategoricalCrossentropy,
# Regression
MeanSquaredError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredLogarithmicError,
CosineSimilarity,
LogCosh,
Huber,
# Hinge
Hinge,
SquaredHinge,
CategoricalHinge,
# Image segmentation
Dice,
Tversky,
# Similarity
Circle,
# Sequence
CTC,
# Probabilistic
kl_divergence,
poisson,
binary_crossentropy,
binary_focal_crossentropy,
categorical_crossentropy,
categorical_focal_crossentropy,
sparse_categorical_crossentropy,
# Regression
mean_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_logarithmic_error,
cosine_similarity,
log_cosh,
huber,
# Hinge
hinge,
squared_hinge,
categorical_hinge,
# Image segmentation
dice,
tversky,
# Similarity
circle,
# Sequence
ctc,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{
"bce": binary_crossentropy,
"BCE": binary_crossentropy,
"kld": kl_divergence,
"KLD": kl_divergence,
"mae": mean_absolute_error,
"MAE": mean_absolute_error,
"mse": mean_squared_error,
"MSE": mean_squared_error,
"mape": mean_absolute_percentage_error,
"MAPE": mean_absolute_percentage_error,
"msle": mean_squared_logarithmic_error,
"MSLE": mean_squared_logarithmic_error,
}
)
@keras_export("keras.losses.serialize")
def serialize(loss):
"""Serializes loss function or `Loss` instance.
Args:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
"""
return serialization_lib.serialize_keras_object(loss)
@keras_export("keras.losses.deserialize")
def deserialize(name, custom_objects=None):
"""Deserializes a serialized loss class/function instance.
Args:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `Loss` instance or a loss function.
"""
return serialization_lib.deserialize_keras_object(
name,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.losses.get")
def get(identifier):
"""Retrieves a Keras loss as a `function`/`Loss` class instance.
The `identifier` may be the string name of a loss function or `Loss` class.
>>> loss = losses.get("categorical_crossentropy")
>>> type(loss)
<class 'function'>
>>> loss = losses.get("CategoricalCrossentropy")
>>> type(loss)
<class '...CategoricalCrossentropy'>
You can also specify `config` of the loss to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Loss` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> loss = losses.get(identifier)
>>> type(loss)
<class '...CategoricalCrossentropy'>
Args:
identifier: A loss identifier. One of None or string name of a loss
function/class or loss configuration dictionary or a loss function
or a loss class instance.
Returns:
A Keras loss as a `function`/ `Loss` class instance.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(f"Could not interpret loss identifier: {identifier}")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/losses/losses_test.py | keras/src/losses/losses_test.py | import re
import numpy as np
import pytest
from keras.src import backend
from keras.src import testing
from keras.src.losses import losses
class MeanSquaredErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(losses.MeanSquaredError(name="mymse"))
def test_base_function_reduction(self):
mse_fn = losses.mean_squared_error
y_true = np.array([4, 8, 12])
y_pred = np.array([[3], [0], [1]])
loss = mse_fn(y_true, y_pred)
self.assertEqual(backend.shape(loss), (3,))
def test_all_correct_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 49.5)
def test_scalar_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 113.85)
def test_sample_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 767.8 / 6)
def test_timestep_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mse_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 97.833336)
def test_zero_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0)
def test_no_reduction(self):
mse_obj = losses.MeanSquaredError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [84.3333, 143.3666])
def test_sum_reduction(self):
mse_obj = losses.MeanSquaredError(reduction="sum")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 227.69998)
def test_mean_with_sample_weight_reduction(self):
mse_obj = losses.MeanSquaredError(reduction="mean_with_sample_weight")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(
loss, (110 / 3 * 1.2 + 187 / 3 * 3.4) / (1.2 + 3.4)
)
def test_dtype_arg(self):
mse_obj = losses.MeanSquaredError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class MeanAbsoluteErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanAbsoluteError(name="myname")
)
def test_all_correct_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 5.5)
def test_scalar_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 12.65)
def test_sample_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 81.4 / 6)
def test_timestep_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mae_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 13.833333)
def test_zero_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0)
def test_no_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [10.7333, 14.5666])
def test_sum_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction="sum")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 25.29999)
def test_mean_with_sample_weight_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction="mean_with_sample_weight")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(
loss, (14 / 3 * 1.2 + 19 / 3 * 3.4) / (1.2 + 3.4)
)
def test_dtype_arg(self):
mae_obj = losses.MeanAbsoluteError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class MeanAbsolutePercentageErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanAbsolutePercentageError(name="mymape")
)
def test_all_correct_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 487.259, 3)
def test_sample_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mape_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 694.4444)
def test_zero_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_no_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [621.8518, 352.6666])
def test_mean_with_sample_weight_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction="mean_with_sample_weight"
)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 183.865)
def test_dtype_arg(self):
mape_obj = losses.MeanAbsolutePercentageError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class MeanSquaredLogarithmicErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanSquaredLogarithmicError(name="mysloge")
)
def test_unweighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 3.3051, 3)
def test_sample_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = msle_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 2.647374)
def test_zero_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_mean_with_sample_weight_reduction(self):
msle_obj = losses.MeanSquaredLogarithmicError(
reduction="mean_with_sample_weight"
)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.646)
def test_dtype_arg(self):
msle_obj = losses.MeanSquaredLogarithmicError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertDType(loss, "bfloat16")
class HingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.3, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.6, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.1, 1.5])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.Hinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.55, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.1, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.1, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.Hinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
def test_dtype_arg(self):
hinge_obj = losses.Hinge(dtype="bfloat16")
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
loss = hinge_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class SquaredHingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.SquaredHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.86, 3)
# Reduction = "sum"
hinge_obj = losses.SquaredHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 3.72, 3)
# Reduction = None
hinge_obj = losses.SquaredHinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.46, 2.26])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.SquaredHinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.SquaredHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.73, 3)
# Reduction = "sum"
hinge_obj = losses.SquaredHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.46, 3)
# Reduction = None
hinge_obj = losses.SquaredHinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.46, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.SquaredHinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
def test_dtype_arg(self):
hinge_obj = losses.SquaredHinge(dtype="bfloat16")
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
loss = hinge_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class CategoricalHingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.4, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.8, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.2, 1.6])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.CategoricalHinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.6, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.2, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.2, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.CategoricalHinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
def test_dtype_arg(self):
hinge_obj = losses.CategoricalHinge(dtype="bfloat16")
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
loss = hinge_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
class CosineSimilarityTest(testing.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = self.np_y_true
self.y_pred = self.np_y_pred
def test_config(self):
cosine_obj = losses.CosineSimilarity(
axis=2, reduction="sum", name="cosine_loss"
)
self.assertEqual(cosine_obj.name, "cosine_loss")
self.assertEqual(cosine_obj.reduction, "sum")
config = cosine_obj.get_config()
self.assertEqual(config, {"name": "cosine_loss", "reduction": "sum"})
def test_unweighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = 2.3
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_sample_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
np_y_true = self.np_y_true.reshape((2, 3, 1))
np_y_pred = self.np_y_pred.reshape((2, 3, 1))
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
y_true = self.l2_norm(np_y_true, 2)
y_pred = self.l2_norm(np_y_pred, 2)
expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
y_true = np_y_true
y_pred = np_y_pred
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = losses.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_dtype_arg(self):
self.setup()
cosine_obj = losses.CosineSimilarity(dtype="bfloat16")
loss = cosine_obj(self.y_true, self.y_pred)
self.assertDType(loss, "bfloat16")
class HuberLossTest(testing.TestCase):
def huber_loss(self, y_true, y_pred, delta=1.0):
error = y_pred - y_true
abs_error = np.abs(error)
quadratic = np.minimum(abs_error, delta)
linear = np.subtract(abs_error, quadratic)
return np.add(
np.multiply(0.5, np.multiply(quadratic, quadratic)),
np.multiply(delta, linear),
)
def setup(self, delta=1.0):
self.np_y_pred = np.array([[0.9, 0.2, 0.2], [0.8, 0.4, 0.6]])
self.np_y_true = np.array([[1.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
self.batch_size = 6
self.expected_losses = self.huber_loss(
self.np_y_true, self.np_y_pred, delta
)
self.y_pred = self.np_y_pred
self.y_true = self.np_y_true
def test_config(self):
h_obj = losses.Huber(reduction="sum", name="huber")
self.assertEqual(h_obj.name, "huber")
self.assertEqual(h_obj.reduction, "sum")
config = h_obj.get_config()
self.assertEqual(config, {"name": "huber", "reduction": "sum"})
def test_all_correct(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = np.array([[1.2], [3.4]])
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_timestep_weighted(self):
self.setup()
h_obj = losses.Huber()
y_pred = self.np_y_pred.reshape((2, 3, 1))
y_true = self.np_y_true.reshape((2, 3, 1))
expected_losses = self.huber_loss(y_true, y_pred)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = h_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
actual_loss = np.multiply(expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_zero_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 0
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.0, 3)
def test_non_default_delta(self):
self.setup(delta=0.8)
h_obj = losses.Huber(delta=0.8)
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, actual_loss, 3)
def test_dtype_arg(self):
self.setup()
h_obj = losses.Huber(dtype="bfloat16")
loss = h_obj(self.y_true, self.y_pred)
self.assertDType(loss, "bfloat16")
class LogCoshTest(testing.TestCase):
def setup(self):
y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_true = y_true
self.y_pred = y_pred
def test_config(self):
logcosh_obj = losses.LogCosh(reduction="sum", name="logcosh_loss")
self.assertEqual(logcosh_obj.name, "logcosh_loss")
self.assertEqual(logcosh_obj.reduction, "sum")
config = logcosh_obj.get_config()
self.assertEqual(config, {"name": "logcosh_loss", "reduction": "sum"})
def test_unweighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = np.asarray([1.2, 3.4])
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, 0.0, 3)
def test_dtype_arg(self):
self.setup()
logcosh_obj = losses.LogCosh(dtype="bfloat16")
loss = logcosh_obj(self.y_true, self.y_pred)
self.assertDType(loss, "bfloat16")
class KLDivergenceTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray(
[0.4, 0.9, 0.12, 0.36, 0.3, 0.4], dtype=np.float32
).reshape((2, 3))
self.y_true = np.asarray(
[0.5, 0.8, 0.12, 0.7, 0.43, 0.8], dtype=np.float32
).reshape((2, 3))
self.batch_size = 2
self.expected_losses = np.multiply(
self.y_true, np.log(self.y_true / self.y_pred)
)
def test_config(self):
k_obj = losses.KLDivergence(reduction="sum", name="kld")
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj.reduction, "sum")
def test_unweighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = 2.3
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray(
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/losses/loss_test.py | keras/src/losses/loss_test.py | import pickle
import numpy as np
import pytest
from keras.src import backend
from keras.src import dtype_policies
from keras.src import losses as losses_module
from keras.src import ops
from keras.src import testing
from keras.src.losses.loss import Loss
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
class ExampleLoss(Loss):
def call(self, y_true, y_pred):
return (y_true - y_pred) ** 2
class LossTest(testing.TestCase):
def setUp(self):
self._global_dtype_policy = dtype_policies.dtype_policy.dtype_policy()
self._floatx = backend.floatx()
return super().setUp()
def tearDown(self):
dtype_policies.dtype_policy.set_dtype_policy(self._global_dtype_policy)
backend.set_floatx(self._floatx)
return super().tearDown()
def test_squeeze_or_expand(self):
x1 = ops.ones((3,))
x2 = ops.ones((3, 1))
x1, x2 = squeeze_or_expand_to_same_rank(x1, x2)
self.assertEqual(ops.shape(x1), (3, 1))
self.assertEqual(ops.shape(x2), (3, 1))
x1 = ops.ones((3, 2))
x2 = ops.ones((3, 2, 1))
x1, x2 = squeeze_or_expand_to_same_rank(x1, x2)
self.assertEqual(ops.shape(x1), (3, 2))
self.assertEqual(ops.shape(x2), (3, 2))
x1 = ops.ones((3,))
x2 = ops.ones((3, 1))
x2, x1 = squeeze_or_expand_to_same_rank(x2, x1)
self.assertEqual(ops.shape(x1), (3, 1))
self.assertEqual(ops.shape(x2), (3, 1))
x1 = ops.ones((3, 2))
x2 = ops.ones((3, 2, 1))
x2, x1 = squeeze_or_expand_to_same_rank(x2, x1)
self.assertEqual(ops.shape(x1), (3, 2))
self.assertEqual(ops.shape(x2), (3, 2))
def test_reduction(self):
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
# No reduction
loss_fn = ExampleLoss(reduction=None)
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose((y_true - y_pred) ** 2, loss)
# sum
loss_fn = ExampleLoss(reduction="sum")
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(np.sum((y_true - y_pred) ** 2), loss)
# sum_over_batch_size or mean
loss_fn = ExampleLoss(reduction="sum_over_batch_size")
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(np.sum((y_true - y_pred) ** 2) / 4, loss)
# bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
ExampleLoss(reduction="abc")
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask(self):
mask = np.array([True, False, True, True])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum((masked_y_true - masked_y_pred) ** 2) / 3, loss
)
# Test edge case where everything is masked.
mask = np.array([False, False, False, False])
y_pred._keras_mask = mask
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(loss, 0) # No NaN.
def test_sample_weight(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(sample_weight * (y_true - y_pred) ** 2) / 4, loss
)
# Test edge case where every weight is 0.
sample_weight = np.array([0.0, 0.0, 0.0, 0.0])
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(loss, 0) # No NaN.
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask_and_sample_weight(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
masked_sample_weight = np.array([0.4, 0.2, 0.1])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(masked_sample_weight * (masked_y_true - masked_y_pred) ** 2)
/ 3,
loss,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask_and_sample_weight_rank2(self):
# check loss of inputs with duplicate rows doesn't change
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
rank1_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
# duplicate rows
mask = ops.tile(ops.expand_dims(mask, axis=0), (2, 1))
y_true = ops.tile(ops.expand_dims(y_true, axis=0), (2, 1))
y_pred = ops.tile(ops.expand_dims(y_pred, axis=0), (2, 1))
sample_weight = ops.tile(ops.expand_dims(sample_weight, axis=0), (2, 1))
y_pred._keras_mask = mask
rank2_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(rank1_loss, rank2_loss)
# @testing.parametrize(
# "uprank", ["mask", "sample_weight", "y_true", "y_pred"])
# TODO: use parameterization decorator
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_rank_adjustment(self):
for uprank in ["mask", "sample_weight", "ys"]:
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
if uprank == "mask":
mask = np.expand_dims(mask, -1)
elif uprank == "sample_weight":
sample_weight = np.expand_dims(sample_weight, -1)
elif uprank == "ys":
y_true = np.expand_dims(y_true, -1)
y_pred = np.expand_dims(y_pred, -1)
masked_sample_weight = np.array([0.4, 0.2, 0.1])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(
masked_sample_weight * (masked_y_true - masked_y_pred) ** 2
)
/ 3,
loss,
)
def test_mixed_dtypes(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1], dtype="float64")
y_true = np.array([1.0, 0.0, 1.0, 0.0], dtype="int32")
y_pred = np.array([0.1, 0.2, 0.3, 0.4], dtype="float32")
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(sample_weight * (y_true - y_pred) ** 2) / 4,
loss,
)
def test_pickle(self):
loss = losses_module.get("mse")
loss = pickle.loads(pickle.dumps(loss))
self.assertEqual(loss, losses_module.mean_squared_error)
def test_get_method(self):
loss = losses_module.get("mse")
self.assertEqual(loss, losses_module.mean_squared_error)
loss = losses_module.get(None)
self.assertEqual(loss, None)
with self.assertRaises(ValueError):
losses_module.get("typo")
def test_dtype_arg(self):
y_true = np.array([1.0, 0.0, 1.0, 0.0], dtype="float32")
y_pred = np.array([0.1, 0.2, 0.3, 0.4], dtype="float32")
# Note: we use float16 and not float64 to test this because
# JAX will map float64 to float32.
loss_fn = ExampleLoss(dtype="float16")
loss = loss_fn(y_true, y_pred)
self.assertDType(loss, "float16")
# Test DTypePolicy for `dtype` argument
loss_fn = ExampleLoss(dtype=dtype_policies.DTypePolicy("mixed_float16"))
loss = loss_fn(y_true, y_pred)
self.assertDType(loss, "float16")
# `dtype` setter should raise AttributeError
with self.assertRaises(AttributeError):
loss_fn.dtype = "bfloat16"
def test_default_dtype(self):
y_true = np.array([1.0, 0.0, 1.0, 0.0], dtype="float32")
y_pred = np.array([0.1, 0.2, 0.3, 0.4], dtype="float32")
# Defaults to `keras.config.floatx()` not global `dtype_policy`
dtype_policies.dtype_policy.set_dtype_policy("mixed_float16")
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred)
self.assertDType(loss, "float32")
backend.set_floatx("float16")
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred)
self.assertDType(loss, backend.floatx())
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/draw_bounding_boxes.py | keras/src/visualization/draw_bounding_boxes.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
try:
import cv2
except ImportError:
cv2 = None
@keras_export("keras.visualization.draw_bounding_boxes")
def draw_bounding_boxes(
images,
bounding_boxes,
bounding_box_format,
class_mapping=None,
color=(128, 128, 128),
line_thickness=2,
text_thickness=1,
font_scale=1.0,
data_format=None,
):
"""Draws bounding boxes on images.
This function draws bounding boxes on a batch of images. It supports
different bounding box formats and can optionally display class labels
and confidences.
Args:
images: A batch of images as a 4D tensor or NumPy array. Shape should be
`(batch_size, height, width, channels)`.
bounding_boxes: A dictionary containing bounding box data. Should have
the following keys:
- `boxes`: A tensor or array of shape `(batch_size, num_boxes, 4)`
containing the bounding box coordinates in the specified format.
- `labels`: A tensor or array of shape `(batch_size, num_boxes)`
containing the class labels for each bounding box.
- `confidences` (Optional): A tensor or array of shape
`(batch_size, num_boxes)` containing the confidence scores for
each bounding box.
bounding_box_format: A string specifying the format of the bounding
boxes. Refer [keras-io](TODO)
class_mapping: A dictionary mapping class IDs (integers) to class labels
(strings). Used to display class labels next to the bounding boxes.
Defaults to None (no labels displayed).
color: A tuple or list representing the RGB color of the bounding boxes.
For example, `(255, 0, 0)` for red. Defaults to `(128, 128, 128)`.
line_thickness: An integer specifying the thickness of the bounding box
lines. Defaults to `2`.
text_thickness: An integer specifying the thickness of the text labels.
Defaults to `1`.
font_scale: A float specifying the scale of the font used for text
labels. Defaults to `1.0`.
data_format: A string, either `"channels_last"` or `"channels_first"`,
specifying the order of dimensions in the input images. Defaults to
the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
"channels_last".
Returns:
A NumPy array of the annotated images with the bounding boxes drawn.
The array will have the same shape as the input `images`.
Raises:
ValueError: If `images` is not a 4D tensor/array, if `bounding_boxes` is
not a dictionary, or if `bounding_boxes` does not contain `"boxes"`
and `"labels"` keys.
TypeError: If `bounding_boxes` is not a dictionary.
ImportError: If `cv2` (OpenCV) is not installed.
"""
if cv2 is None:
raise ImportError(
"The `draw_bounding_boxes` function requires the `cv2` package "
" (OpenCV). Please install it with `pip install opencv-python`."
)
class_mapping = class_mapping or {}
text_thickness = (
text_thickness or line_thickness
) # Default text_thickness if not provided.
data_format = data_format or backend.image_data_format()
images_shape = ops.shape(images)
if len(images_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={images_shape}"
)
if not isinstance(bounding_boxes, dict):
raise TypeError(
"`bounding_boxes` should be a dict. "
f"Received: bounding_boxes={bounding_boxes} of type "
f"{type(bounding_boxes)}"
)
if "boxes" not in bounding_boxes or "labels" not in bounding_boxes:
raise ValueError(
"`bounding_boxes` should be a dict containing 'boxes' and "
f"'labels' keys. Received: bounding_boxes={bounding_boxes}"
)
if data_format == "channels_last":
h_axis = -3
w_axis = -2
else:
h_axis = -2
w_axis = -1
height = images_shape[h_axis]
width = images_shape[w_axis]
bounding_boxes = bounding_boxes.copy()
bounding_boxes = convert_format(
bounding_boxes, bounding_box_format, "xyxy", height, width
)
# To numpy array
images = ops.convert_to_numpy(images).astype("uint8")
boxes = ops.convert_to_numpy(bounding_boxes["boxes"])
labels = ops.convert_to_numpy(bounding_boxes["labels"])
if "confidences" in bounding_boxes:
confidences = ops.convert_to_numpy(bounding_boxes["confidences"])
else:
confidences = None
result = []
batch_size = images.shape[0]
for i in range(batch_size):
_image = images[i]
_box = boxes[i]
_class = labels[i]
for box_i in range(_box.shape[0]):
x1, y1, x2, y2 = _box[box_i].astype("int32")
c = _class[box_i].astype("int32")
if c == -1:
continue
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
c = int(c)
# Draw bounding box
cv2.rectangle(_image, (x1, y1), (x2, y2), color, line_thickness)
if c in class_mapping:
label = class_mapping[c]
if confidences is not None:
conf = confidences[i][box_i]
label = f"{label} | {conf:.2f}"
font_x1, font_y1 = _find_text_location(
x1, y1, font_scale, text_thickness
)
cv2.putText(
img=_image,
text=label,
org=(font_x1, font_y1),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=color,
thickness=text_thickness,
)
result.append(_image)
return np.stack(result, axis=0)
def _find_text_location(x, y, font_scale, thickness):
font_height = int(font_scale * 12)
target_y = y - 8
if target_y - (2 * font_height) > 0:
return x, y - 8
line_offset = thickness
static_offset = 3
return (
x + static_offset,
y + (2 * font_height) + line_offset + static_offset,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/plot_segmentation_mask_gallery.py | keras/src/visualization/plot_segmentation_mask_gallery.py | import functools
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.visualization.draw_segmentation_masks import (
draw_segmentation_masks,
)
from keras.src.visualization.plot_image_gallery import plot_image_gallery
@keras_export("keras.visualization.plot_segmentation_mask_gallery")
def plot_segmentation_mask_gallery(
images,
num_classes,
value_range=(0, 255),
y_true=None,
y_pred=None,
color_mapping=None,
blend=True,
alpha=0.8,
ignore_index=-1,
data_format=None,
**kwargs,
):
"""Plots a gallery of images with corresponding segmentation masks.
Args:
images: A 4D tensor or NumPy array of images. Shape should be
`(batch_size, height, width, channels)`.
num_classes: The number of segmentation classes. Class indices should
start from `1`. Class `0` will be treated as background and
ignored if `ignore_index` is not 0.
value_range: A tuple specifying the value range of the images
(e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`.
y_true: A 3D/4D tensor or NumPy array representing the ground truth
segmentation masks. Shape should be `(batch_size, height, width)` or
`(batch_size, height, width, 1)`. Defaults to `None`.
y_pred: A 3D/4D tensor or NumPy array representing the predicted
segmentation masks. Shape should be the same as `y_true`.
Defaults to `None`.
color_mapping: A dictionary mapping class indices to RGB colors.
If `None`, a default color palette is used. Class indices start
from `1`. Defaults to `None`.
blend: Whether to blend the masks with the input image using the
`alpha` value. If `False`, the masks are drawn directly on the
images without blending. Defaults to `True`.
alpha: The opacity of the segmentation masks (a float between 0 and 1).
Defaults to `0.8`.
ignore_index: The class index to ignore when drawing masks.
Defaults to `-1`.
data_format: The image data format `"channels_last"` or
`"channels_first"`. Defaults to the Keras backend data format.
kwargs: Additional keyword arguments to be passed to
`keras.visualization.plot_image_gallery`.
Returns:
The output of `keras.visualization.plot_image_gallery`.
Raises:
ValueError: If `images` is not a 4D tensor/array.
"""
data_format = data_format or backend.image_data_format()
image_shape = ops.shape(images)
if len(image_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={image_shape}"
)
if data_format == "channels_first":
images = ops.transpose(images, (0, 2, 3, 1))
batch_size = image_shape[0] if len(image_shape) == 4 else 1
rows = batch_size
cols = 1
if y_true is not None:
cols += 1
if y_pred is not None:
cols += 1
images_np = ops.convert_to_numpy(images)
draw_masks_fn = functools.partial(
draw_segmentation_masks,
num_classes=num_classes,
color_mapping=color_mapping,
alpha=alpha,
ignore_index=ignore_index,
blend=blend,
)
if y_true is not None:
if data_format == "channels_first":
y_true = ops.transpose(y_true, (0, 2, 3, 1))
y_true = ops.cast(y_true, "int32")
true_masks_drawn = draw_masks_fn(images_np, y_true)
if y_pred is not None:
if data_format == "channels_first":
y_pred = ops.transpose(y_pred, (0, 2, 3, 1))
y_pred = ops.cast(y_pred, "int32")
predicted_masks_drawn = draw_masks_fn(images_np, y_pred)
images_with_masks = []
for i in range(batch_size):
images_with_masks.append(images_np[i])
if y_true is not None:
images_with_masks.append(true_masks_drawn[i])
if y_pred is not None:
images_with_masks.append(predicted_masks_drawn[i])
gallery_images = np.stack(images_with_masks, axis=0)
return plot_image_gallery(
gallery_images, value_range=value_range, rows=rows, cols=cols, **kwargs
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/plot_image_gallery.py | keras/src/visualization/plot_image_gallery.py | import math
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
def _extract_image_batch(images, num_images, batch_size):
"""Extracts a batch of images for plotting.
Args:
images: The 4D tensor or NumPy array of images.
num_images: The number of images to extract.
batch_size: The original batch size of the images.
Returns:
A 4D tensor or NumPy array containing the extracted images.
Raises:
ValueError: If `images` is not a 4D tensor/array.
"""
if len(ops.shape(images)) != 4:
raise ValueError(
"`plot_images_gallery()` requires you to "
"batch your `np.array` samples together."
)
num_samples = min(num_images, batch_size)
sample = images[:num_samples, ...]
return sample
@keras_export("keras.visualization.plot_image_gallery")
def plot_image_gallery(
images,
y_true=None,
y_pred=None,
label_map=None,
rows=None,
cols=None,
value_range=(0, 255),
scale=2,
path=None,
show=None,
transparent=True,
dpi=60,
legend_handles=None,
data_format=None,
):
"""Displays a gallery of images with optional labels and predictions.
Args:
images: A 4D tensor or NumPy array of images. Shape should be
`(batch_size, height, width, channels)`.
y_true: A 1D tensor or NumPy array of true labels (class indices).
Defaults to `None`.
y_pred: A 1D tensor or NumPy array of predicted labels (class indices).
Defaults to `None`.
label_map: A dictionary mapping class indices to class names.
Required if `y_true` or `y_pred` are provided.
Defaults to `None`.
value_range: A tuple specifying the value range of the images
(e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`.
rows: The number of rows in the gallery. If `None`, it's calculated
based on the number of images and `cols`. Defaults to `None`.
cols: The number of columns in the gallery. If `None`, it's calculated
based on the number of images and `rows`. Defaults to `None`.
scale: A float controlling the size of the displayed images. The images
are scaled by this factor. Defaults to `2`.
path: The path to save the generated gallery image. If `None`, the
image is displayed using `plt.show()`. Defaults to `None`.
show: Whether to display the image using `plt.show()`. If `True`, the
image is displayed. If `False`, the image is not displayed.
Ignored if `path` is not `None`. Defaults to `True` if `path`
is `None`, `False` otherwise.
transparent: A boolean, whether to save the figure with a transparent
background. Defaults to `True`.
dpi: The DPI (dots per inch) for saving the figure. Defaults to 60.
legend_handles: A list of matplotlib `Patch` objects to use as legend
handles. Defaults to `None`.
data_format: The image data format `"channels_last"` or
`"channels_first"`. Defaults to the Keras backend data format.
Raises:
ValueError: If both `path` and `show` are set to non-`None` values,
if `images` is not a 4D tensor or array, or if `y_true` or `y_pred`
are provided without a `label_map`.
ImportError: if matplotlib is not installed.
"""
if plt is None:
raise ImportError(
"The `plot_image_gallery` function requires the `matplotlib` "
"package. Please install it with `pip install matplotlib`."
)
if path is not None and show:
raise ValueError(
"plot_gallery() expects either `path` to be set, or `show` "
"to be true."
)
if (y_true is not None or y_pred is not None) and label_map is None:
raise ValueError(
"If `y_true` or `y_pred` are provided, a `label_map` must also be"
" provided."
)
show = show if show is not None else (path is None)
data_format = data_format or backend.image_data_format()
batch_size = ops.shape(images)[0] if len(ops.shape(images)) == 4 else 1
rows = rows or int(math.ceil(math.sqrt(batch_size)))
cols = cols or int(math.ceil(batch_size // rows))
num_images = rows * cols
images = _extract_image_batch(images, num_images, batch_size)
if (
data_format == "channels_first"
): # Ensure correct data format for plotting
images = ops.transpose(images, (0, 2, 3, 1))
# Generate subplots
fig, axes = plt.subplots(
nrows=rows,
ncols=cols,
figsize=(cols * scale, rows * scale),
frameon=False,
layout="tight",
squeeze=True,
sharex="row",
sharey="col",
)
fig.subplots_adjust(wspace=0, hspace=0)
if isinstance(axes, np.ndarray) and len(axes.shape) == 1:
expand_axis = 0 if rows == 1 else -1
axes = np.expand_dims(axes, expand_axis)
if legend_handles is not None:
fig.legend(handles=legend_handles, loc="lower center")
images = BaseImagePreprocessingLayer()._transform_value_range(
images=images, original_range=value_range, target_range=(0, 255)
)
images = ops.convert_to_numpy(images)
if data_format == "channels_first":
images = images.transpose(0, 2, 3, 1)
if y_true is not None:
y_true = ops.convert_to_numpy(y_true)
if y_pred is not None:
y_pred = ops.convert_to_numpy(y_pred)
for row in range(rows):
for col in range(cols):
index = row * cols + col
current_axis = (
axes[row, col] if isinstance(axes, np.ndarray) else axes
)
current_axis.imshow(images[index].astype("uint8"))
current_axis.margins(x=0, y=0)
current_axis.axis("off")
title_parts = []
if y_true is not None and index < len(y_true):
title_parts.append(
f"Label: {label_map.get(y_true[index], 'Unknown')}"
)
if y_pred is not None and index < len(y_pred):
title_parts.append(
f"Pred: {label_map.get(y_pred[index], 'Unknown')}"
)
if title_parts:
current_axis.set_title(" ".join(title_parts), fontsize=8)
if path is not None:
plt.savefig(
fname=path,
pad_inches=0,
bbox_inches="tight",
transparent=transparent,
dpi=dpi,
)
plt.close()
elif show:
plt.show()
plt.close()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/plot_bounding_box_gallery.py | keras/src/visualization/plot_bounding_box_gallery.py | import functools
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.visualization.draw_bounding_boxes import draw_bounding_boxes
from keras.src.visualization.plot_image_gallery import plot_image_gallery
try:
from matplotlib import patches # For legend patches
except ImportError:
patches = None
@keras_export("keras.visualization.plot_bounding_box_gallery")
def plot_bounding_box_gallery(
images,
bounding_box_format,
y_true=None,
y_pred=None,
value_range=(0, 255),
true_color=(0, 188, 212),
pred_color=(255, 235, 59),
line_thickness=2,
font_scale=1.0,
text_thickness=None,
class_mapping=None,
ground_truth_mapping=None,
prediction_mapping=None,
legend=False,
legend_handles=None,
rows=None,
cols=None,
data_format=None,
**kwargs,
):
"""Plots a gallery of images with bounding boxes.
This function can display both ground truth and predicted bounding boxes on
a set of images. It supports various bounding box formats and can include
class labels and a legend.
Args:
images: A 4D tensor or NumPy array of images. Shape should be
`(batch_size, height, width, channels)`.
bounding_box_format: The format of the bounding boxes.
Refer [keras-io](TODO)
y_true: A dictionary containing the ground truth bounding boxes and
labels. Should have the same structure as the `bounding_boxes`
argument in `keras.visualization.draw_bounding_boxes`.
Defaults to `None`.
y_pred: A dictionary containing the predicted bounding boxes and labels.
Should have the same structure as `y_true`. Defaults to `None`.
value_range: A tuple specifying the value range of the images
(e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`.
true_color: A tuple of three integers representing the RGB color for the
ground truth bounding boxes. Defaults to `(0, 188, 212)`.
pred_color: A tuple of three integers representing the RGB color for the
predicted bounding boxes. Defaults to `(255, 235, 59)`.
line_thickness: The thickness of the bounding box lines. Defaults to 2.
font_scale: The scale of the font used for labels. Defaults to 1.0.
text_thickness: The thickness of the bounding box text. Defaults to
`line_thickness`.
class_mapping: A dictionary mapping class IDs to class names. Used f
or both ground truth and predicted boxes if `ground_truth_mapping`
and `prediction_mapping` are not provided. Defaults to `None`.
ground_truth_mapping: A dictionary mapping class IDs to class names
specifically for ground truth boxes. Overrides `class_mapping`
for ground truth. Defaults to `None`.
prediction_mapping: A dictionary mapping class IDs to class names
specifically for predicted boxes. Overrides `class_mapping` for
predictions. Defaults to `None`.
legend: A boolean indicating whether to show a legend.
Defaults to `False`.
legend_handles: A list of matplotlib `Patch` objects to use for the
legend. If this is provided, the `legend` argument will be ignored.
Defaults to `None`.
rows: The number of rows in the image gallery. Required if the images
are not batched. Defaults to `None`.
cols: The number of columns in the image gallery. Required if the images
are not batched. Defaults to `None`.
data_format: The image data format `"channels_last"` or
`"channels_first"`. Defaults to the Keras backend data format.
kwargs: Additional keyword arguments to be passed to
`keras.visualization.plot_image_gallery`.
Returns:
The output of `keras.visualization.plot_image_gallery`.
Raises:
ValueError: If `images` is not a 4D tensor/array or if both `legend` a
nd `legend_handles` are specified.
ImportError: if matplotlib is not installed
"""
if patches is None:
raise ImportError(
"The `plot_bounding_box_gallery` function requires the "
" `matplotlib` package. Please install it with "
" `pip install matplotlib`."
)
prediction_mapping = prediction_mapping or class_mapping
ground_truth_mapping = ground_truth_mapping or class_mapping
data_format = data_format or backend.image_data_format()
images_shape = ops.shape(images)
if len(images_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={images_shape}"
)
if data_format == "channels_first": # Ensure correct data format
images = ops.transpose(images, (0, 2, 3, 1))
plotted_images = ops.convert_to_numpy(images)
draw_fn = functools.partial(
draw_bounding_boxes,
bounding_box_format=bounding_box_format,
line_thickness=line_thickness,
text_thickness=text_thickness,
font_scale=font_scale,
)
if y_true is not None:
plotted_images = draw_fn(
plotted_images,
y_true,
color=true_color,
class_mapping=ground_truth_mapping,
)
if y_pred is not None:
plotted_images = draw_fn(
plotted_images,
y_pred,
color=pred_color,
class_mapping=prediction_mapping,
)
if legend:
if legend_handles:
raise ValueError(
"Only pass `legend` OR `legend_handles` to "
"`keras.visualization.plot_bounding_box_gallery()`."
)
legend_handles = [
patches.Patch(
color=np.array(true_color) / 255.0, # Normalize color
label="Ground Truth",
),
patches.Patch(
color=np.array(pred_color) / 255.0, # Normalize color
label="Prediction",
),
]
return plot_image_gallery(
plotted_images,
value_range=value_range,
legend_handles=legend_handles,
rows=rows,
cols=cols,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/__init__.py | keras/src/visualization/__init__.py | from keras.src.visualization import draw_bounding_boxes
from keras.src.visualization import plot_image_gallery
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/visualization/draw_segmentation_masks.py | keras/src/visualization/draw_segmentation_masks.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.visualization.draw_segmentation_masks")
def draw_segmentation_masks(
images,
segmentation_masks,
num_classes=None,
color_mapping=None,
alpha=0.8,
blend=True,
ignore_index=-1,
data_format=None,
):
"""Draws segmentation masks on images.
The function overlays segmentation masks on the input images.
The masks are blended with the images using the specified alpha value.
Args:
images: A batch of images as a 4D tensor or NumPy array. Shape
should be (batch_size, height, width, channels).
segmentation_masks: A batch of segmentation masks as a 3D or 4D tensor
or NumPy array. Shape should be (batch_size, height, width) or
(batch_size, height, width, 1). The values represent class indices
starting from 1 up to `num_classes`. Class 0 is reserved for
the background and will be ignored if `ignore_index` is not 0.
num_classes: The number of segmentation classes. If `None`, it is
inferred from the maximum value in `segmentation_masks`.
color_mapping: A dictionary mapping class indices to RGB colors.
If `None`, a default color palette is generated. The keys should be
integers starting from 1 up to `num_classes`.
alpha: The opacity of the segmentation masks. Must be in the range
`[0, 1]`.
blend: Whether to blend the masks with the input image using the
`alpha` value. If `False`, the masks are drawn directly on the
images without blending. Defaults to `True`.
ignore_index: The class index to ignore. Mask pixels with this value
will not be drawn. Defaults to -1.
data_format: Image data format, either `"channels_last"` or
`"channels_first"`. Defaults to the `image_data_format` value found
in your Keras config file at `~/.keras/keras.json`. If you never
set it, then it will be `"channels_last"`.
Returns:
A NumPy array of the images with the segmentation masks overlaid.
Raises:
ValueError: If the input `images` is not a 4D tensor or NumPy array.
TypeError: If the input `segmentation_masks` is not an integer type.
"""
data_format = data_format or backend.image_data_format()
images_shape = ops.shape(images)
if len(images_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={images_shape}"
)
if data_format == "channels_first":
images = ops.transpose(images, (0, 2, 3, 1))
segmentation_masks = ops.transpose(segmentation_masks, (0, 2, 3, 1))
images = ops.convert_to_tensor(images, dtype="float32")
segmentation_masks = ops.convert_to_tensor(segmentation_masks)
if not backend.is_int_dtype(segmentation_masks.dtype):
dtype = backend.standardize_dtype(segmentation_masks.dtype)
raise TypeError(
"`segmentation_masks` must be in integer dtype. "
f"Received: segmentation_masks.dtype={dtype}"
)
# Infer num_classes
if num_classes is None:
num_classes = int(ops.convert_to_numpy(ops.max(segmentation_masks)))
if color_mapping is None:
colors = _generate_color_palette(num_classes)
else:
colors = [color_mapping[i] for i in range(num_classes)]
valid_masks = ops.not_equal(segmentation_masks, ignore_index)
valid_masks = ops.squeeze(valid_masks, axis=-1)
segmentation_masks = ops.one_hot(segmentation_masks, num_classes)
segmentation_masks = segmentation_masks[..., 0, :]
segmentation_masks = ops.convert_to_numpy(segmentation_masks)
# Replace class with color
masks = segmentation_masks
masks = np.transpose(masks, axes=(3, 0, 1, 2)).astype("bool")
images_to_draw = ops.convert_to_numpy(images).copy()
for mask, color in zip(masks, colors):
color = np.array(color, dtype=images_to_draw.dtype)
images_to_draw[mask, ...] = color[None, :]
images_to_draw = ops.convert_to_tensor(images_to_draw)
outputs = ops.cast(images_to_draw, dtype="float32")
if blend:
outputs = images * (1 - alpha) + outputs * alpha
outputs = ops.where(valid_masks[..., None], outputs, images)
outputs = ops.cast(outputs, dtype="uint8")
outputs = ops.convert_to_numpy(outputs)
return outputs
def _generate_color_palette(num_classes):
palette = np.array([2**25 - 1, 2**15 - 1, 2**21 - 1])
return [((i * palette) % 255).tolist() for i in range(num_classes)]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/imdb.py | keras/src/datasets/imdb.py | """IMDB sentiment classification dataset."""
import json
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
from keras.src.utils.python_utils import remove_long_seq
@keras_export("keras.datasets.imdb.load_data")
def load_data(
path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
**kwargs,
):
"""Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment
(positive/negative). Reviews have been preprocessed, and each review is
encoded as a list of word indexes (integers).
For convenience, words are indexed by overall frequency in the dataset,
so that for instance the integer "3" encodes the 3rd most frequent word in
the data. This allows for quick filtering operations such as:
"only consider the top 10,000 most
common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used
to encode the pad token.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only
the `num_words` most frequent words are kept. Any less frequent word
will appear as `oov_char` value in the sequence data. If None,
all words are kept. Defaults to `None`.
skip_top: skip the top N most frequently occurring words
(which may not be informative). These words will appear as
`oov_char` value in the dataset. When 0, no words are
skipped. Defaults to `0`.
maxlen: int or None. Maximum sequence length.
Any longer sequence will be truncated. None, means no truncation.
Defaults to `None`.
seed: int. Seed for reproducible data shuffling.
start_char: int. The start of a sequence will be marked with this
character. 0 is usually the padding character. Defaults to `1`.
oov_char: int. The out-of-vocabulary character.
Words that were cut out because of the `num_words` or
`skip_top` limits will be replaced with this character.
index_from: int. Index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: lists of sequences, which are lists of indexes
(integers). If the num_words argument was specific, the maximum
possible index value is `num_words - 1`. If the `maxlen` argument was
specified, the largest possible sequence length is `maxlen`.
**`y_train`, `y_test`**: lists of integer labels (1 or 0).
**Note**: The 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=f"{origin_folder}imdb.npz",
file_hash=( # noqa: E501
"69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, labels_train = f["x_train"], f["y_train"]
x_test, labels_test = f["x_test"], f["y_test"]
rng = np.random.RandomState(seed)
indices = np.arange(len(x_train))
rng.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
rng.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
if start_char is not None:
x_train = [[start_char] + [w + index_from for w in x] for x in x_train]
x_test = [[start_char] + [w + index_from for w in x] for x in x_test]
elif index_from:
x_train = [[w + index_from for w in x] for x in x_train]
x_test = [[w + index_from for w in x] for x in x_test]
else:
x_train = [[w for w in x] for x in x_train]
x_test = [[w for w in x] for x in x_test]
if maxlen:
x_train, labels_train = remove_long_seq(maxlen, x_train, labels_train)
x_test, labels_test = remove_long_seq(maxlen, x_test, labels_test)
if not x_train or not x_test:
raise ValueError(
"After filtering for sequences shorter than maxlen="
f"{str(maxlen)}, no sequence was kept. Increase maxlen."
)
xs = x_train + x_test
labels = np.concatenate([labels_train, labels_test])
if not num_words:
num_words = max(max(x) for x in xs)
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [
[w if (skip_top <= w < num_words) else oov_char for w in x]
for x in xs
]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx], dtype="object"), labels[:idx]
x_test, y_test = np.array(xs[idx:], dtype="object"), labels[idx:]
return (x_train, y_train), (x_test, y_test)
@keras_export("keras.datasets.imdb.get_word_index")
def get_word_index(path="imdb_word_index.json"):
"""Retrieves a dict mapping words to their index in the IMDB dataset.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary. Keys are word strings, values are their
index.
Example:
```python
# Use the default parameters to keras.datasets.imdb.load_data
start_char = 1
oov_char = 2
index_from = 3
# Retrieve the training sequences.
(x_train, _), _ = keras.datasets.imdb.load_data(
start_char=start_char, oov_char=oov_char, index_from=index_from
)
# Retrieve the word index file mapping words to indices
word_index = keras.datasets.imdb.get_word_index()
# Reverse the word index to obtain a dict mapping indices to words
# And add `index_from` to indices to sync with `x_train`
inverted_word_index = dict(
(i + index_from, word) for (word, i) in word_index.items()
)
# Update `inverted_word_index` to include `start_char` and `oov_char`
inverted_word_index[start_char] = "[START]"
inverted_word_index[oov_char] = "[OOV]"
# Decode the first sequence in the dataset
decoded_sequence = " ".join(inverted_word_index[i] for i in x_train[0])
```
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=f"{origin_folder}imdb_word_index.json",
file_hash="bfafd718b763782e994055a2d397834f",
)
with open(path) as f:
return json.load(f)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/boston_housing.py | keras/src/datasets/boston_housing.py | import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.boston_housing.load_data")
def load_data(path="boston_housing.npz", test_split=0.2, seed=113):
"""Loads the Boston Housing dataset.
This is a dataset taken from the StatLib library which is maintained at
Carnegie Mellon University.
**WARNING:** This dataset has an ethical problem: the authors of this
dataset included a variable, "B", that may appear to assume that racial
self-segregation influences house prices. As such, we strongly discourage
the use of this dataset, unless in the context of illustrating ethical
issues in data science and machine learning.
Samples contain 13 attributes of houses at different locations around the
Boston suburbs in the late 1970s. Targets are the median values of
the houses at a location (in k$).
The attributes themselves are defined in the
[StatLib website](http://lib.stat.cmu.edu/datasets/boston).
Args:
path: path where to cache the dataset locally
(relative to `~/.keras/datasets`).
test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data
before computing the test split.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**x_train, x_test**: NumPy arrays with shape `(num_samples, 13)`
containing either the training samples (for x_train),
or test samples (for y_train).
**y_train, y_test**: NumPy arrays of shape `(num_samples,)` containing the
target scalars. The targets are float scalars typically between 10 and
50 that represent the home prices in k$.
"""
assert 0 <= test_split < 1
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=f"{origin_folder}boston_housing.npz",
file_hash=( # noqa: E501
"f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5"
),
)
with np.load(path, allow_pickle=True) as f:
x = f["x"]
y = f["y"]
rng = np.random.RandomState(seed)
indices = np.arange(len(x))
rng.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[: int(len(x) * (1 - test_split))])
y_train = np.array(y[: int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)) :])
y_test = np.array(y[int(len(x) * (1 - test_split)) :])
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/fashion_mnist.py | keras/src/datasets/fashion_mnist.py | """Fashion-MNIST dataset."""
import gzip
import os
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.fashion_mnist.load_data")
def load_data():
"""Loads the Fashion-MNIST dataset.
This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories,
along with a test set of 10,000 images. This dataset can be used as
a drop-in replacement for MNIST.
The classes are:
| Label | Description |
|:-----:|-------------|
| 0 | T-shirt/top |
| 1 | Trouser |
| 2 | Pullover |
| 3 | Dress |
| 4 | Coat |
| 5 | Sandal |
| 6 | Shirt |
| 7 | Sneaker |
| 8 | Bag |
| 9 | Ankle boot |
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(60000, 28, 28)`, containing the training data.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(60000,)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
(10000, 28, 28), containing the test data.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(10000,)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
assert x_train.shape == (60000, 28, 28)
assert x_test.shape == (10000, 28, 28)
assert y_train.shape == (60000,)
assert y_test.shape == (10000,)
```
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
dirname = os.path.join("datasets", "fashion-mnist")
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"train-labels-idx1-ubyte.gz",
"train-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))
with gzip.open(paths[0], "rb") as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(
len(y_train), 28, 28
)
with gzip.open(paths[2], "rb") as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], "rb") as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(
len(y_test), 28, 28
)
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/mnist.py | keras/src/datasets/mnist.py | """MNIST handwritten digits dataset."""
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.mnist.load_data")
def load_data(path="mnist.npz"):
"""Loads the MNIST dataset.
This is a dataset of 60,000 28x28 grayscale images of the 10 digits,
along with a test set of 10,000 images.
More info can be found at the
[MNIST homepage](http://yann.lecun.com/exdb/mnist/).
Args:
path: path where to cache the dataset locally
(relative to `~/.keras/datasets`).
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(60000, 28, 28)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of digit labels (integers in range 0-9)
with shape `(60000,)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 28, 28)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of digit labels (integers in range 0-9)
with shape `(10000,)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
assert x_train.shape == (60000, 28, 28)
assert x_test.shape == (10000, 28, 28)
assert y_train.shape == (60000,)
assert y_test.shape == (10000,)
```
License:
Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset,
which is a derivative work from original NIST datasets.
MNIST dataset is made available under the terms of the
[Creative Commons Attribution-Share Alike 3.0 license.](
https://creativecommons.org/licenses/by-sa/3.0/)
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=f"{origin_folder}mnist.npz",
file_hash=( # noqa: E501
"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f["x_train"], f["y_train"]
x_test, y_test = f["x_test"], f["y_test"]
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/cifar100.py | keras/src/datasets/cifar100.py | """CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_data(label_mode="fine"):
"""Loads the CIFAR100 dataset.
This is a dataset of 50,000 32x32 color training images and
10,000 test images, labeled over 100 fine-grained classes that are
grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Args:
label_mode: one of `"fine"`, `"coarse"`.
If it is `"fine"`, the category labels
are the fine-grained labels, and if it is `"coarse"`,
the output labels are the coarse-grained superclasses.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python-target"
origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
extract=True,
file_hash=( # noqa: E501
"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
),
)
path = os.path.join(path, "cifar-100-python")
fpath = os.path.join(path, "train")
x_train, y_train = load_batch(fpath, label_key=f"{label_mode}_labels")
fpath = os.path.join(path, "test")
x_test, y_test = load_batch(fpath, label_key=f"{label_mode}_labels")
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.