repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/california_housing.py | keras/src/datasets/california_housing.py | """Boston housing price regression dataset."""
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.california_housing.load_data")
def load_data(
version="large", path="california_housing.npz", test_split=0.2, seed=113
):
"""Loads the California Housing dataset.
This dataset was obtained from the [StatLib repository](
https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html).
It's a continuous regression dataset with 20,640 samples with
8 features each.
The target variable is a scalar: the median house value
for California districts, in dollars.
The 8 input features are the following:
- MedInc: median income in block group
- HouseAge: median house age in block group
- AveRooms: average number of rooms per household
- AveBedrms: average number of bedrooms per household
- Population: block group population
- AveOccup: average number of household members
- Latitude: block group latitude
- Longitude: block group longitude
This dataset was derived from the 1990 U.S. census, using one row
per census block group. A block group is the smallest geographical
unit for which the U.S. Census Bureau publishes sample data
(a block group typically has a population of 600 to 3,000 people).
A household is a group of people residing within a home.
Since the average number of rooms and bedrooms in this dataset are
provided per household, these columns may take surprisingly large
values for block groups with few households and many empty houses,
such as vacation resorts.
Args:
version: `"small"` or `"large"`. The small version
contains 600 samples, the large version contains
20,640 samples. The purpose of the small version is
to serve as an approximate replacement for the
deprecated `boston_housing` dataset.
path: path where to cache the dataset locally
(relative to `~/.keras/datasets`).
test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data
before computing the test split.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: numpy arrays with shape `(num_samples, 8)`
containing either the training samples (for `x_train`),
or test samples (for `y_train`).
**`y_train`, `y_test`**: numpy arrays of shape `(num_samples,)`
containing the target scalars. The targets are float scalars
typically between 25,000 and 500,000 that represent
the home prices in dollars.
"""
assert 0 <= test_split < 1
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=f"{origin_folder}california_housing.npz",
file_hash=( # noqa: E501
"1a2e3a52e0398de6463aebe6f4a8da34fb21fbb6b934cf88c3425e766f2a1a6f"
),
)
with np.load(path, allow_pickle=True) as f:
x = f["x"]
y = f["y"]
if version == "small":
x = x[:600]
y = y[:600]
elif version != "large":
raise ValueError(
"Argument `version` must be one of 'small', 'large'. "
f"Received: version={version}"
)
rng = np.random.RandomState(seed)
indices = np.arange(len(x))
rng.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[: int(len(x) * (1 - test_split))])
y_train = np.array(y[: int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)) :])
y_test = np.array(y[int(len(x) * (1 - test_split)) :])
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/__init__.py | keras/src/datasets/__init__.py | """Small NumPy datasets for debugging/testing."""
from keras.src.datasets import boston_housing
from keras.src.datasets import california_housing
from keras.src.datasets import cifar10
from keras.src.datasets import cifar100
from keras.src.datasets import fashion_mnist
from keras.src.datasets import imdb
from keras.src.datasets import mnist
from keras.src.datasets import reuters
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/cifar10.py | keras/src/datasets/cifar10.py | """CIFAR10 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar10.load_data")
def load_data():
"""Loads the CIFAR10 dataset.
This is a dataset of 50,000 32x32 color training images and 10,000 test
images, labeled over 10 categories. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
The classes are:
| Label | Description |
|:-----:|-------------|
| 0 | airplane |
| 1 | automobile |
| 2 | bird |
| 3 | cat |
| 4 | deer |
| 5 | dog |
| 6 | frog |
| 7 | horse |
| 8 | ship |
| 9 | truck |
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
**Note**: The CIFAR-10 dataset is known to have a small percentage of
mislabeled samples, which is inherent to the original dataset. This label
noise may impact training and evaluation. For more details, refer to
discussions in the research literature on CIFAR-10 label quality.
"""
dirname = "cifar-10-batches-py-target"
origin = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
extract=True,
file_hash=( # noqa: E501
"6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
),
)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.empty((num_train_samples,), dtype="uint8")
# batches are within an inner folder
path = os.path.join(path, "cifar-10-batches-py")
for i in range(1, 6):
fpath = os.path.join(path, f"data_batch_{i}")
(
x_train[(i - 1) * 10000 : i * 10000, :, :, :],
y_train[(i - 1) * 10000 : i * 10000],
) = load_batch(fpath)
fpath = os.path.join(path, "test_batch")
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/reuters.py | keras/src/datasets/reuters.py | """Reuters topic classification dataset."""
import json
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
from keras.src.utils.python_utils import remove_long_seq
@keras_export("keras.datasets.reuters.load_data")
def load_data(
path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
):
"""Loads the Reuters newswire classification dataset.
This is a dataset of 11,228 newswires from Reuters, labeled over 46 topics.
This was originally generated by parsing and preprocessing the classic
Reuters-21578 dataset, but the preprocessing code is no longer packaged
with Keras. See this
[GitHub discussion](https://github.com/keras-team/keras/issues/12072)
for more info.
Each newswire is encoded as a list of word indexes (integers).
For convenience, words are indexed by overall frequency in the dataset,
so that for instance the integer "3" encodes the 3rd most frequent word in
the data. This allows for quick filtering operations such as:
"only consider the top 10,000 most
common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used
to encode any unknown word.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only
the `num_words` most frequent words are kept. Any less frequent word
will appear as `oov_char` value in the sequence data. If None,
all words are kept. Defaults to `None`.
skip_top: skip the top N most frequently occurring words
(which may not be informative). These words will appear as
`oov_char` value in the dataset. 0 means no words are
skipped. Defaults to `0`.
maxlen: int or None. Maximum sequence length.
Any longer sequence will be truncated. None means no truncation.
Defaults to `None`.
test_split: Float between `0.` and `1.`. Fraction of the dataset to be
used as test data. `0.2` means that 20% of the dataset is used as
test data. Defaults to `0.2`.
seed: int. Seed for reproducible data shuffling.
start_char: int. The start of a sequence will be marked with this
character. 0 is usually the padding character. Defaults to `1`.
oov_char: int. The out-of-vocabulary character.
Words that were cut out because of the `num_words` or
`skip_top` limits will be replaced with this character.
index_from: int. Index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: lists of sequences, which are lists of indexes
(integers). If the num_words argument was specific, the maximum
possible index value is `num_words - 1`. If the `maxlen` argument was
specified, the largest possible sequence length is `maxlen`.
**`y_train`, `y_test`**: lists of integer labels (1 or 0).
**Note**: The 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=f"{origin_folder}reuters.npz",
file_hash=( # noqa: E501
"d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916"
),
)
with np.load(path, allow_pickle=True) as f:
xs, labels = f["x"], f["y"]
rng = np.random.RandomState(seed)
indices = np.arange(len(xs))
rng.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = remove_long_seq(maxlen, xs, labels)
if not num_words:
num_words = max(max(x) for x in xs)
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [
[w if skip_top <= w < num_words else oov_char for w in x]
for x in xs
]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = int(len(xs) * (1 - test_split))
x_train, y_train = (
np.array(xs[:idx], dtype="object"),
np.array(labels[:idx]),
)
x_test, y_test = np.array(xs[idx:], dtype="object"), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
@keras_export("keras.datasets.reuters.get_word_index")
def get_word_index(path="reuters_word_index.json"):
"""Retrieves a dict mapping words to their index in the Reuters dataset.
Actual word indices starts from 3, with 3 indices reserved for:
0 (padding), 1 (start), 2 (oov).
E.g. word index of 'the' is 1, but the in the actual training data, the
index of 'the' will be 1 + 3 = 4. Vice versa, to translate word indices in
training data back to words using this mapping, indices need to subtract 3.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary. Keys are word strings, values are their
index.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=f"{origin_folder}reuters_word_index.json",
file_hash="4d44cc38712099c9e383dc6e5f11a921",
)
with open(path) as f:
return json.load(f)
@keras_export("keras.datasets.reuters.get_label_names")
def get_label_names():
"""Returns labels as a list of strings with indices matching training data.
Reference:
- [Reuters Dataset](https://martin-thoma.com/nlp-reuters/)
"""
return (
"cocoa",
"grain",
"veg-oil",
"earn",
"acq",
"wheat",
"copper",
"housing",
"money-supply",
"coffee",
"sugar",
"trade",
"reserves",
"ship",
"cotton",
"carcass",
"crude",
"nat-gas",
"cpi",
"money-fx",
"interest",
"gnp",
"meal-feed",
"alum",
"oilseed",
"gold",
"tin",
"strategic-metal",
"livestock",
"retail",
"ipi",
"iron-steel",
"rubber",
"heat",
"jobs",
"lei",
"bop",
"zinc",
"orange",
"pet-chem",
"dlr",
"gas",
"silver",
"wpi",
"hog",
"lead",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/datasets/cifar.py | keras/src/datasets/cifar.py | """Utilities common to CIFAR10 and CIFAR100 datasets."""
import _pickle as cPickle
def load_batch(fpath, label_key="labels"):
"""Internal utility for parsing CIFAR data.
Args:
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
Returns:
A tuple `(data, labels)`.
"""
with open(fpath, "rb") as f:
d = cPickle.load(f, encoding="bytes")
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode("utf8")] = v
d = d_decoded
data = d["data"]
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/probabilistic_metrics_test.py | keras/src/metrics/probabilistic_metrics_test.py | import numpy as np
from keras.src import metrics
from keras.src import testing
class KLDivergenceTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray(
[0.4, 0.9, 0.12, 0.36, 0.3, 0.4], dtype=np.float32
).reshape((2, 3))
self.y_true = np.asarray(
[0.5, 0.8, 0.12, 0.7, 0.43, 0.8], dtype=np.float32
).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(
self.y_true, np.log(self.y_true / self.y_pred)
)
def test_config(self):
k_obj = metrics.KLDivergence(name="kld", dtype="int32")
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj._dtype, "int32")
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, "kld")
self.assertEqual(k_obj2._dtype, "int32")
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
k_obj.update_state(self.y_true, self.y_pred)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray(
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(result, expected_result, atol=1e-3)
class PoissonTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray([1, 9, 2, 5, 2, 6], dtype=np.float32).reshape(
(2, 3)
)
self.y_true = np.asarray([4, 8, 12, 8, 1, 3], dtype=np.float32).reshape(
(2, 3)
)
self.batch_size = 6
self.expected_results = self.y_pred - np.multiply(
self.y_true, np.log(self.y_pred)
)
def test_config(self):
self.run_class_serialization_test(metrics.Poisson(name="poisson"))
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
poisson_obj.update_state(self.y_true, self.y_pred)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
result = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray(
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class BinaryCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.BinaryCrossentropy(
name="bce", dtype="int32", label_smoothing=0.2
)
)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
self.assertAllClose(result, 3.9855, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
y_true = np.array([[1, 0, 1], [0, 1, 1]])
y_pred = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
result = bce_obj(y_true, y_pred)
self.assertAllClose(result, 3.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = np.array([1.5, 2.0])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 3.4162, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
y_true = np.array([[1, 0, 1], [0, 1, 1]])
y_pred = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
sample_weight = np.array([2.0, 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 3.7037, atol=1e-3)
def test_label_smoothing(self):
logits = np.array(((10.0, -10.0, -10.0)))
y_true = np.array(((1, 0, 1)))
label_smoothing = 0.1
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
result = bce_obj(y_true, logits)
expected_value = (10.0 + 5.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, result, atol=1e-3)
class CategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.CategoricalCrossentropy(
name="cce", dtype="int32", label_smoothing=0.2
)
)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
self.assertAllClose(loss, 3.667, atol=1e-3)
class SparseCategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.SparseCategoricalCrossentropy(name="scce", dtype="int32")
)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/accuracy_metrics.py | keras/src/metrics/accuracy_metrics.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.metrics import reduction_metrics
def accuracy(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx())
@keras_export("keras.metrics.Accuracy")
class Accuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Accuracy()])
```
"""
def __init__(self, name="accuracy", dtype=None):
super().__init__(fn=accuracy, name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.binary_accuracy")
def binary_accuracy(y_true, y_pred, threshold=0.5):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true)
threshold = ops.convert_to_tensor(threshold)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_pred = ops.cast(ops.greater(y_pred, threshold), y_true.dtype)
return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx())
@keras_export("keras.metrics.BinaryAccuracy")
class BinaryAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Example:
>>> m = keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name="binary_accuracy", dtype=None, threshold=0.5):
super().__init__(
fn=binary_accuracy, name=name, dtype=dtype, threshold=threshold
)
self.threshold = threshold
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"threshold": self.threshold,
}
@keras_export("keras.metrics.categorical_accuracy")
def categorical_accuracy(y_true, y_pred):
y_true = ops.argmax(y_true, axis=-1)
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true_org_shape = ops.shape(y_true)
y_pred_rank = len(y_pred.shape)
y_true_rank = len(y_true.shape)
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (
(y_true_rank is not None)
and (y_pred_rank is not None)
and (len(y_true.shape) == len(y_pred.shape))
):
y_true = ops.squeeze(y_true, -1)
reshape_matches = True
y_pred = ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast
# them to match.
if y_pred.dtype is not y_true.dtype:
y_pred = ops.cast(y_pred, dtype=y_true.dtype)
matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
if reshape_matches:
matches = ops.reshape(matches, y_true_org_shape)
return matches
@keras_export("keras.metrics.CategoricalAccuracy")
class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `categorical accuracy`: an idempotent
operation that simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities,
rather than as labels. If necessary, use `ops.one_hot` to expand `y_true` as
a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name="categorical_accuracy", dtype=None):
super().__init__(fn=categorical_accuracy, name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.sparse_categorical_accuracy")
def sparse_categorical_accuracy(y_true, y_pred):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true_org_shape = ops.shape(y_true)
y_pred_rank = len(y_pred.shape)
y_true_rank = len(y_true.shape)
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (
(y_true_rank is not None)
and (y_pred_rank is not None)
and (len(y_true.shape) == len(y_pred.shape))
and ops.shape(y_true)[-1] == 1
):
y_true = ops.squeeze(y_true, -1)
reshape_matches = True
y_pred = ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast
# them to match.
if y_pred.dtype is not y_true.dtype:
y_pred = ops.cast(y_pred, y_true.dtype)
matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
if reshape_matches:
matches = ops.reshape(matches, y_true_org_shape)
# if shape is (num_samples, 1) squeeze
if len(matches.shape) > 1 and matches.shape[-1] == 1:
matches = ops.squeeze(matches, -1)
return matches
@keras_export("keras.metrics.SparseCategoricalAccuracy")
class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `sparse categorical accuracy`: an
idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name="sparse_categorical_accuracy", dtype=None):
super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.top_k_categorical_accuracy")
def top_k_categorical_accuracy(y_true, y_pred, k=5):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true = ops.argmax(y_true, axis=-1)
y_true_rank = len(y_true.shape)
y_pred_rank = len(y_pred.shape)
y_true_org_shape = ops.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = ops.reshape(y_true, [-1])
matches = ops.cast(
ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
dtype=backend.floatx(),
)
# returned matches is expected to have same shape as y_true input
if reshape_matches:
matches = ops.reshape(matches, y_true_org_shape)
return matches
@keras_export("keras.metrics.TopKCategoricalAccuracy")
class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=[keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None):
super().__init__(
fn=top_k_categorical_accuracy,
name=name,
dtype=dtype,
k=k,
)
self.k = k
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype, "k": self.k}
@keras_export("keras.metrics.sparse_top_k_categorical_accuracy")
def sparse_top_k_categorical_accuracy(
y_true, y_pred, k=5, from_sorted_ids=False
):
"""Computes how often integer targets are in the top `K` predictions.
Args:
y_true: A tensor of shape `(batch_size)` representing indices or IDs of
true categories.
y_pred: If `from_sorted_ids=False`, a tensor of shape
`(batch_size, num_categories)` containing the scores for each sample
for all possible categories. If `from_sorted_ids=True`, a tensor of
shape `(batch_size, N)` containing indices or IDs of the top `N`
categories in order from highest score to lowest score.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
from_sorted_ids: (Optional) Whether `y_pred` is sorted category IDs or
scores for all categories (the default).
Returns:
A tensor with the same shape as `y_true` containing ones where `y_true`
is in the top `k` and zeros elsewhere.
"""
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true_dtype = y_pred.dtype if from_sorted_ids else "int32"
y_true = ops.convert_to_tensor(y_true, dtype=y_true_dtype)
y_true_rank = len(y_true.shape)
y_pred_rank = len(y_pred.shape)
y_true_org_shape = ops.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = ops.reshape(y_true, [-1])
if from_sorted_ids:
# By slicing the first k items, we assume they are sorted by score.
# Reduce with `any` to count multiple matches only once.
matches = ops.any(
ops.equal(ops.expand_dims(y_true, axis=1), y_pred[:, :k]), axis=1
)
else:
matches = ops.in_top_k(y_true, y_pred, k=k)
matches = ops.cast(matches, dtype=backend.floatx())
# returned matches is expected to have same shape as y_true input
if reshape_matches:
matches = ops.reshape(matches, y_true_org_shape)
return matches
@keras_export("keras.metrics.SparseTopKCategoricalAccuracy")
class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
By default, the arguments expected by `update_state()` are:
- `y_true`: a tensor of shape `(batch_size)` representing indices of true
categories.
- `y_pred`: a tensor of shape `(batch_size, num_categories)` containing the
scores for each sample for all possible categories.
With `from_sorted_ids=True`, the arguments expected by `update_state` are:
- `y_true`: a tensor of shape `(batch_size)` representing indices or IDs of
true categories.
- `y_pred`: a tensor of shape `(batch_size, N)` containing the indices or
IDs of the top `N` categories sorted in order from highest score to
lowest score. `N` must be greater or equal to `k`.
The `from_sorted_ids=True` option can be more efficient when the set of
categories is very large and the model has an optimized way to retrieve the
top ones either without scoring or without maintaining the scores for all
the possible categories.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_sorted_ids: (Optional) When `False`, the default, the tensor passed
in `y_pred` contains the unsorted scores of all possible categories.
When `True`, `y_pred` contains a the indices or IDs for the top
categories.
Example:
>>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
>>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1,
... from_sorted_ids=True)
>>> m.update_state([2, 1], [[1, 0, 3], [1, 2, 3]])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(
self,
k=5,
name="sparse_top_k_categorical_accuracy",
dtype=None,
from_sorted_ids=False,
):
super().__init__(
fn=sparse_top_k_categorical_accuracy,
name=name,
dtype=dtype,
k=k,
from_sorted_ids=from_sorted_ids,
)
self.k = k
self.from_sorted_ids = from_sorted_ids
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
config = {"name": self.name, "dtype": self.dtype, "k": self.k}
if self.from_sorted_ids:
config["from_sorted_ids"] = True
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/hinge_metrics.py | keras/src/metrics/hinge_metrics.py | from keras.src.api_export import keras_export
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import hinge
from keras.src.losses.losses import squared_hinge
from keras.src.metrics import reduction_metrics
@keras_export("keras.metrics.Hinge")
class Hinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.3
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.1
"""
def __init__(self, name="hinge", dtype=None):
super().__init__(fn=hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.SquaredHinge")
class SquaredHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.86
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.46
"""
def __init__(self, name="squared_hinge", dtype=None):
super().__init__(fn=squared_hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.CategoricalHinge")
class CategoricalHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.2
"""
def __init__(self, name="categorical_hinge", dtype=None):
super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/iou_metrics_test.py | keras/src/metrics/iou_metrics_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.metrics import iou_metrics as metrics
from keras.src.ops import convert_to_tensor
class IoUTest(testing.TestCase):
def test_config(self):
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertEqual(obj.num_classes, 2)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.IoU.from_config(obj.get_config())
self.assertEqual(obj2.name, "iou_class_1_0")
self.assertEqual(obj2.num_classes, 2)
self.assertEqual(obj2.target_class_ids, [1, 0])
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.1 / (0.4 + 0.5 - 0.1) + 0.2 / (0.6 + 0.5 - 0.2)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
obj = metrics.IoU(
num_classes=2, target_class_ids=[0, 1], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
@pytest.mark.requires_trainable_backend
def test_compilation(self):
m_obj = metrics.MeanIoU(num_classes=2, ignore_class=0)
model = models.Sequential(
[
layers.Dense(2, activation="softmax"),
]
)
model.compile(optimizer="rmsprop", loss="mse", metrics=[m_obj])
model.fit(np.array([[1.0, 1.0]]), np.array([[1.0, 0.0]]))
class BinaryIoUTest(testing.TestCase):
def test_config(self):
obj = metrics.BinaryIoU(
target_class_ids=[1, 0], threshold=0.1, name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.BinaryIoU.from_config(obj.get_config())
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj2.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
def test_different_thresholds_weighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[0.2, 0.4],
# [0.3, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=0.3, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
sample_weight = np.array([0.1, 0.2, 0.4, 0.3])
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[0.1+0.4, 0],
# [0.2, 0.3]]
# sum_row = [0.5, 0.5], sum_col = [0.7, 0.3], true_positives = [0.5,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.5 / (0.5 + 0.7 - 0.5) + 0.3 / (0.5 + 0.3 - 0.3)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=0.5, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_different_thresholds_unweighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[2, 0],
# [1, 1]]
# sum_row = [2, 2], sum_col = [3, 1], true_positives = [2, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (2 / (2 + 3 - 2) + 1 / (2 + 1 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_true = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_pred = np.array([[0.1, 0.7], [0.9, 0.3]])
threshold = 0.4 # y_pred will become [[0, 1], [1, 0]]
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
# cm = [[0.2, 0.4],
# [0.1, 0.3]]
# sum_row = [0.6, 0.4], sum_col = [0.3, 0.7], true_positives = [0.2,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.3 - 0.2) + 0.3 / (0.4 + 0.7 - 0.3)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=threshold, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.BinaryIoU(target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([0.6], dtype=np.float32)
threshold = 0.5
y_true = np.array([1])
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = 1 / (1 + 1 - 1)
self.assertAllClose(result, expected_result, atol=1e-3)
class MeanIoUTest(testing.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name="mean_iou")
self.assertEqual(m_obj.name, "mean_iou")
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, "mean_iou")
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_255(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, 255]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=255)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_1(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, -1]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=-1)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted_ignore_class_1(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, -1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2, ignore_class=-1, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.0]]
# sum_row = [0.6, 0.3], sum_col = [0.5, 0.4], true_positives = [0.2,
# 0.0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.0 / (0.3 + 0.4 - 0.0)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.assertAllClose(m_obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
@staticmethod
def _confusion_matrix(y_true, y_pred, num_classes):
"""
Creates a confusion matrix as a numpy array using vectorized operations.
Parameters:
- y_true: array-like, true class labels.
- y_pred: array-like, predicted class labels.
- num_classes: int, number of classes.
Returns:
- conf_matrix: np.ndarray, confusion matrix of shape (num_classes,
num_classes).
"""
# Map pairs of (y_true, y_pred) to indices in the confusion matrix
indices = y_true * num_classes + y_pred
# Count occurrences of each index
conf_matrix = np.bincount(indices, minlength=num_classes * num_classes)
# Reshape the flat array into a 2D confusion matrix
conf_matrix = conf_matrix.reshape((num_classes, num_classes))
return conf_matrix
@staticmethod
def _get_big_chunk(dtype):
np.random.seed(14)
all_y_true = np.random.choice([0, 1, 2], size=(10, 530, 530))
# Generate random probabilities for each channel
random_probs = np.random.rand(10, 530, 530, 3)
# Normalize to ensure the last dimension sums to 1
all_y_pred = random_probs / random_probs.sum(axis=-1, keepdims=True)
# Convert predictions to class indices
all_y_pred_arg = np.argmax(all_y_pred, axis=-1)
mean_iou_metric = metrics.MeanIoU(num_classes=3, dtype=dtype)
conf_matrix_start_point = np.array(
[
[18729664, 18728760, 18731196],
[18727297, 18726105, 18728071],
[18727917, 18717835, 18723155],
]
)
mean_iou_metric.total_cm = mean_iou_metric.add_variable(
name="total_confusion_matrix",
shape=(3, 3),
initializer=convert_to_tensor(conf_matrix_start_point),
dtype=dtype or "int",
)
mean_iou_metric.update_state(all_y_true, all_y_pred_arg)
tmp_true = np.reshape(all_y_true, -1)
tmp_pred = np.reshape(all_y_pred_arg, -1)
return (
all_y_true,
all_y_pred_arg,
mean_iou_metric,
tmp_true,
tmp_pred,
conf_matrix_start_point,
)
def test_big_chunk(self):
# Init. process with dtype=None which will default to int
(
all_y_true,
all_y_pred_arg,
mean_iou_metric_all,
tmp_true,
tmp_pred,
conf_matrix_start_point,
) = self._get_big_chunk(dtype=None)
conf_matrix_from_keras = np.array(mean_iou_metric_all.total_cm)
# Validate confusion matrices and results
conf_matrix_manual = (
self._confusion_matrix(tmp_true, tmp_pred, 3)
+ conf_matrix_start_point
)
self.assertTrue(
np.array_equal(conf_matrix_from_keras, conf_matrix_manual),
msg="Confusion matrices do not match!",
)
# Now same but with float32 dtype, in here the confusion matrix
# should not match. Likely this can be removed
(
all_y_true,
all_y_pred_arg,
mean_iou_metric_all,
tmp_true,
tmp_pred,
conf_matrix_start_point,
) = self._get_big_chunk(dtype="float32")
conf_matrix_from_keras = np.array(mean_iou_metric_all.total_cm)
# Validate confusion matrices and results
conf_matrix_manual = (
self._confusion_matrix(tmp_true, tmp_pred, 3)
+ conf_matrix_start_point
)
self.assertFalse(
np.array_equal(conf_matrix_from_keras, conf_matrix_manual),
msg="Confusion matrices match, but they should not!",
)
def test_user_warning_float_weight(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 1, 0]
m_obj = metrics.MeanIoU(num_classes=3)
with pytest.warns(Warning, match=r"weight.*float.*int.*casting"):
m_obj(y_true, y_pred, sample_weight=np.array([0.2, 0.3, 0.4, 0.1]))
class OneHotIoUTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (1 + 2 - 0) + 1 / (3 + 1 - 1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
sample_weight = [0.1, 0.2, 0.3, 0.4]
# cm = [[0, 0, 0.2+0.4],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
obj = metrics.OneHotIoU(
num_classes=3, target_class_ids=[0, 2], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class OneHotMeanIoUTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 0 + 1 / (3 + 1 - 1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_true = np.array(
[
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
]
)
# y_true will be converted to [2, 0, 1, 0, 0]
y_pred = np.array(
[
[0.2, 0.3, 0.5],
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.1],
[0.1, 0.4, 0.5],
[0.6, 0.2, 0.2],
]
)
# y_pred will be converted to [2, 2, 0, 2, 0]
sample_weight = [0.1, 0.2, 0.3, 0.3, 0.1]
# cm = [[0.1, 0, 0.2+0.3],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.4, 0, 0.6], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0.1, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.1 / (0.4 + 0.6 - 0.1) + 0 + 0.1 / (0.6 + 0.1 - 0.1)
) / 3
obj = metrics.OneHotMeanIoU(num_classes=3, dtype="float32")
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
# Check same result with int weights
sample_weight_int = [1, 2, 3, 3, 1]
obj_int = metrics.OneHotMeanIoU(num_classes=3)
result_int = obj_int(y_true, y_pred, sample_weight=sample_weight_int)
self.assertAllClose(result_int, expected_result, atol=1e-3)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/regression_metrics_test.py | keras/src/metrics/regression_metrics_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.metrics import regression_metrics as metrics
class MeanSquaredErrorTest(testing.TestCase):
def test_config(self):
# TODO
pass
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
class CosineSimilarityTest(testing.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = self.np_y_true
self.y_pred = self.np_y_pred
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name="my_cos", dtype="int32"
)
self.assertEqual(cosine_obj.name, "my_cos")
self.assertEqual(cosine_obj.dtype, "int32")
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(
cosine_obj.get_config()
)
self.assertEqual(cosine_obj2.name, "my_cos")
self.assertEqual(cosine_obj2._dtype, "int32")
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.sum(self.expected_loss * sample_weight) / np.sum(
sample_weight
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
class MeanAbsoluteErrorTest(testing.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name="my_mae", dtype="int32")
self.assertEqual(mae_obj.name, "my_mae")
self.assertEqual(mae_obj._dtype, "int32")
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, "my_mae")
self.assertEqual(mae_obj2._dtype, "int32")
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mae_obj.update_state(y_true, y_pred)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
class MeanAbsolutePercentageErrorTest(testing.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name="my_mape", dtype="int32"
)
self.assertEqual(mape_obj.name, "my_mape")
self.assertEqual(mape_obj._dtype, "int32")
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config()
)
self.assertEqual(mape_obj2.name, "my_mape")
self.assertEqual(mape_obj2._dtype, "int32")
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[
[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1],
],
dtype="float32",
)
result = mape_obj(y_true, y_pred)
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[
[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1],
],
dtype="float32",
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, result, atol=1e-5)
class MeanSquaredLogarithmicErrorTest(testing.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name="my_msle", dtype="int32"
)
self.assertEqual(msle_obj.name, "my_msle")
self.assertEqual(msle_obj._dtype, "int32")
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config()
)
self.assertEqual(msle_obj2.name, "my_msle")
self.assertEqual(msle_obj2._dtype, "int32")
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
msle_obj.update_state(y_true, y_pred)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, result, atol=1e-5)
class RootMeanSquaredErrorTest(testing.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name="rmse", dtype="int32")
self.assertEqual(rmse_obj.name, "rmse")
self.assertEqual(rmse_obj._dtype, "int32")
rmse_obj2 = metrics.RootMeanSquaredError.from_config(
rmse_obj.get_config()
)
self.assertEqual(rmse_obj2.name, "rmse")
self.assertEqual(rmse_obj2._dtype, "int32")
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
y_true = np.array([2, 4, 6])
y_pred = np.array([1, 3, 2])
rmse_obj.update_state(y_true, y_pred)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(np.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
y_true = np.array([2, 4, 6])
y_pred = np.array([1, 3, 2])
y_true = np.array([2, 4, 6, 8])
y_pred = np.array([1, 3, 2, 3])
sample_weight = np.array([0, 1, 0, 1])
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(np.sqrt(13), result, atol=1e-3)
class LogCoshErrorTest(testing.TestCase):
def setup(self):
y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = y_pred
self.y_true = y_true
def test_config(self):
logcosh_obj = metrics.LogCoshError(name="logcosh", dtype="int32")
self.assertEqual(logcosh_obj.name, "logcosh")
self.assertEqual(logcosh_obj._dtype, "int32")
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
logcosh_obj.update_state(self.y_true, self.y_pred)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError(dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
result = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(
(2, 3)
)
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class R2ScoreTest(testing.TestCase):
def _run_test(
self,
y_true,
y_pred,
sample_weights,
class_aggregation,
num_regressors,
reference_result,
):
r2 = metrics.R2Score(class_aggregation, num_regressors, dtype="float32")
r2.update_state(y_true, y_pred, sample_weights)
result = r2.result()
self.assertAllClose(result, reference_result, atol=1e-6)
def test_config(self):
r2_obj = metrics.R2Score(
class_aggregation=None, num_regressors=2, dtype="float32"
)
self.assertEqual(r2_obj.class_aggregation, None)
self.assertEqual(r2_obj.num_regressors, 2)
self.assertEqual(r2_obj.dtype, "float32")
# Check save and restore config
r2_obj2 = metrics.R2Score.from_config(r2_obj.get_config())
self.assertEqual(r2_obj2.class_aggregation, None)
self.assertEqual(r2_obj2.num_regressors, 2)
self.assertEqual(r2_obj2.dtype, "float32")
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.37, -1.295, 0.565]),
("uniform_average", 0, -0.12),
("variance_weighted_average", 0, -0.12),
)
def test_r2_sklearn_comparison(
self, class_aggregation, num_regressors, result
):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.5, 0.6], [0.1, 0.2, 0.3], [0.5, 0.8, 0.2]]
self._run_test(
y_true,
y_pred,
None,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.17305559, -8.836666, -0.521]),
(None, 1, [0.054920673, -10.241904, -0.7382858]),
(None, 2, [-0.10259259, -12.115555, -1.0280001]),
("uniform_average", 0, -3.0615367889404297),
("uniform_average", 1, -3.641756534576416),
("uniform_average", 2, -4.415382385253906),
("variance_weighted_average", 0, -1.3710224628448486),
("variance_weighted_average", 1, -1.7097399234771729),
("variance_weighted_average", 2, -2.161363363265991),
)
def test_r2_tfa_comparison(self, class_aggregation, num_regressors, result):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.9, 1.6], [0.1, 1.2, 0.6], [1.5, 0.8, 0.6]]
sample_weights = [0.8, 0.1, 0.4]
self._run_test(
y_true,
y_pred,
sample_weights,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
def test_errors(self):
# Bad class_aggregation value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `class_aggregation`"
):
metrics.R2Score(class_aggregation="wrong")
# Bad num_regressors value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `num_regressors`"
):
metrics.R2Score(num_regressors=-1)
# Bad input shape
with self.assertRaisesRegex(ValueError, "expects 2D inputs with shape"):
r2 = metrics.R2Score()
r2.update_state([0.0, 1.0], [0.0, 1.0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/probabilistic_metrics.py | keras/src/metrics/probabilistic_metrics.py | from keras.src.api_export import keras_export
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.metrics import reduction_metrics
@keras_export("keras.metrics.KLDivergence")
class KLDivergence(reduction_metrics.MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and
`y_pred`.
Formula:
```python
metric = y_true * log(y_true / y_pred)
```
`y_true` and `y_pred` are expected to be probability
distributions, with values between 0 and 1. They will get
clipped to the `[0, 1]` range.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
0.45814306
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
0.9162892
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras.metrics.KLDivergence()])
```
"""
def __init__(self, name="kl_divergence", dtype=None):
super().__init__(fn=kl_divergence, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.Poisson")
class Poisson(reduction_metrics.MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
Formula:
```python
metric = y_pred - y_true * log(y_pred)
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras.metrics.Poisson()])
```
"""
def __init__(self, name="poisson", dtype=None):
super().__init__(fn=poisson, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.BinaryCrossentropy")
class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected
to be a logits tensor. By default, we consider
that output encodes a probability distribution.
label_smoothing: (Optional) Float in `[0, 1]`.
When > 0, label values are smoothed,
meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use
a value of 0.1 for label "0" and 0.9 for label "1".
Examples:
>>> m = keras.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
0.81492424
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
0.9162905
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(
self,
name="binary_crossentropy",
dtype=None,
from_logits=False,
label_smoothing=0,
):
super().__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
}
@keras_export("keras.metrics.CategoricalCrossentropy")
class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). It assumes that labels are one-hot encoded,
e.g., when labels values are `[2, 0, 1]`, then
`y_true` is `[[0, 0, 1], [1, 0, 0], [0, 1, 0]]`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be
a logits tensor. By default, we consider that output
encodes a probability distribution.
label_smoothing: (Optional) Float in `[0, 1]`.
When > 0, label values are smoothed, meaning the confidence
on label values are relaxed. e.g. `label_smoothing=0.2` means
that we will use a value of 0.1 for label
"0" and 0.9 for label "1".
axis: (Optional) Defaults to `-1`.
The dimension along which entropy is computed.
Examples:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
>>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(y'), axis = -1)
>>> # = -((log 0.95), (log 0.1))
>>> # = [0.051, 2.302]
>>> # Reduced xent = (0.051 + 2.302) / 2
>>> m = keras.metrics.CategoricalCrossentropy()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result()
1.1769392
>>> m.reset_state()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=np.array([0.3, 0.7]))
>>> m.result()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.CategoricalCrossentropy()])
```
"""
def __init__(
self,
name="categorical_crossentropy",
dtype=None,
from_logits=False,
label_smoothing=0,
axis=-1,
):
super().__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
@keras_export("keras.metrics.SparseCategoricalCrossentropy")
class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
It expects labels to be provided as integers. If you want to provide labels
that are one-hot encoded, please use the `CategoricalCrossentropy`
metric instead.
There should be `num_classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected
to be a logits tensor. By default, we consider that output
encodes a probability distribution.
axis: (Optional) Defaults to `-1`.
The dimension along which entropy is computed.
Examples:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = keras.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result()
1.1769392
>>> m.reset_state()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=np.array([0.3, 0.7]))
>>> m.result()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(
self,
name="sparse_categorical_crossentropy",
dtype=None,
from_logits=False,
axis=-1,
):
super().__init__(
sparse_categorical_crossentropy,
name=name,
dtype=dtype,
from_logits=from_logits,
axis=axis,
)
self.from_logits = from_logits
self.axis = axis
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"axis": self.axis,
}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/f_score_metrics.py | keras/src/metrics/f_score_metrics.py | from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.metrics.metric import Metric
@keras_export("keras.metrics.FBetaScore")
class FBetaScore(Metric):
"""Computes F-Beta score.
Formula:
```python
b2 = beta ** 2
f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall)
```
This is the weighted harmonic mean of precision and recall.
Its output range is `[0, 1]`. It works for both multi-class
and multi-label classification.
Args:
average: Type of averaging to be performed across per-class results
in the multi-class case.
Acceptable values are `None`, `"micro"`, `"macro"` and
`"weighted"`. Defaults to `None`.
If `None`, no averaging is performed and `result()` will return
the score for each class.
If `"micro"`, compute metrics globally by counting the total
true positives, false negatives and false positives.
If `"macro"`, compute metrics for each label,
and return their unweighted mean.
This does not take label imbalance into account.
If `"weighted"`, compute metrics for each label,
and return their average weighted by support
(the number of true instances for each label).
This alters `"macro"` to account for label imbalance.
It can result in an score that is not between precision and recall.
beta: Determines the weight of given to recall
in the harmonic mean between precision and recall (see pseudocode
equation above). Defaults to `1`.
threshold: Elements of `y_pred` greater than `threshold` are
converted to be 1, and the rest 0. If `threshold` is
`None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
name: Optional. String name of the metric instance.
dtype: Optional. Data type of the metric result.
Returns:
F-Beta Score: float.
Example:
>>> metric = keras.metrics.FBetaScore(beta=2.0, threshold=0.5)
>>> y_true = np.array([[1, 1, 1],
... [1, 0, 0],
... [1, 1, 0]], np.int32)
>>> y_pred = np.array([[0.2, 0.6, 0.7],
... [0.2, 0.6, 0.6],
... [0.6, 0.8, 0.0]], np.float32)
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
>>> result
[0.3846154 , 0.90909094, 0.8333334 ]
"""
def __init__(
self,
average=None,
beta=1.0,
threshold=None,
name="fbeta_score",
dtype=None,
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Invalid `average` argument value. Expected one of: "
"{None, 'micro', 'macro', 'weighted'}. "
f"Received: average={average}"
)
if not isinstance(beta, float):
raise ValueError(
"Invalid `beta` argument value. "
"It should be a Python float. "
f"Received: beta={beta} of type '{type(beta)}'"
)
if beta <= 0.0:
raise ValueError(
"Invalid `beta` argument value. "
"It should be > 0. "
f"Received: beta={beta}"
)
if threshold is not None:
if not isinstance(threshold, float):
raise ValueError(
"Invalid `threshold` argument value. "
"It should be a Python float. "
f"Received: threshold={threshold} "
f"of type '{type(threshold)}'"
)
if threshold > 1.0 or threshold <= 0.0:
raise ValueError(
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
f"Received: threshold={threshold}"
)
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self._built = False
if self.average != "micro":
self.axis = 0
def _build(self, y_true_shape, y_pred_shape):
if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
raise ValueError(
"FBetaScore expects 2D inputs with shape "
"(batch_size, output_dim). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
if y_pred_shape[-1] is None or y_true_shape[-1] is None:
raise ValueError(
"FBetaScore expects 2D inputs with shape "
"(batch_size, output_dim), with output_dim fully "
"defined (not None). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
num_classes = y_pred_shape[-1]
if self.average != "micro":
init_shape = (num_classes,)
else:
init_shape = ()
def _add_zeros_variable(name):
return self.add_variable(
name=name,
shape=init_shape,
initializer=initializers.Zeros(),
dtype=self.dtype,
)
self.true_positives = _add_zeros_variable("true_positives")
self.false_positives = _add_zeros_variable("false_positives")
self.false_negatives = _add_zeros_variable("false_negatives")
self.intermediate_weights = _add_zeros_variable("intermediate_weights")
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
if not self._built:
self._build(y_true.shape, y_pred.shape)
if self.threshold is None:
threshold = ops.max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = ops.logical_and(
y_pred >= threshold, ops.abs(y_pred) > 1e-9
)
else:
y_pred = y_pred > self.threshold
y_pred = ops.cast(y_pred, dtype=self.dtype)
y_true = ops.cast(y_true, dtype=self.dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(
sample_weight, dtype=self.dtype
)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = ops.multiply(val, ops.expand_dims(sample_weight, 1))
return ops.sum(val, axis=self.axis)
self.true_positives.assign(
self.true_positives + _weighted_sum(y_pred * y_true, sample_weight)
)
self.false_positives.assign(
self.false_positives
+ _weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign(
self.false_negatives
+ _weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.intermediate_weights.assign(
self.intermediate_weights + _weighted_sum(y_true, sample_weight)
)
def result(self):
precision = ops.divide(
self.true_positives,
self.true_positives + self.false_positives + backend.epsilon(),
)
recall = ops.divide(
self.true_positives,
self.true_positives + self.false_negatives + backend.epsilon(),
)
precision = ops.convert_to_tensor(precision, dtype=self.dtype)
recall = ops.convert_to_tensor(recall, dtype=self.dtype)
mul_value = precision * recall
add_value = ((self.beta**2) * precision) + recall
mean = ops.divide(mul_value, add_value + backend.epsilon())
f1_score = mean * (1 + (self.beta**2))
if self.average == "weighted":
weights = ops.divide(
self.intermediate_weights,
ops.sum(self.intermediate_weights) + backend.epsilon(),
)
f1_score = ops.sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = ops.mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"name": self.name,
"dtype": self.dtype,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_state(self):
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
@keras_export("keras.metrics.F1Score")
class F1Score(FBetaScore):
r"""Computes F-1 Score.
Formula:
```python
f1_score = 2 * (precision * recall) / (precision + recall)
```
This is the harmonic mean of precision and recall.
Its output range is `[0, 1]`. It works for both multi-class
and multi-label classification.
Args:
average: Type of averaging to be performed on data.
Acceptable values are `None`, `"micro"`, `"macro"`
and `"weighted"`. Defaults to `None`.
If `None`, no averaging is performed and `result()` will return
the score for each class.
If `"micro"`, compute metrics globally by counting the total
true positives, false negatives and false positives.
If `"macro"`, compute metrics for each label,
and return their unweighted mean.
This does not take label imbalance into account.
If `"weighted"`, compute metrics for each label,
and return their average weighted by support
(the number of true instances for each label).
This alters `"macro"` to account for label imbalance.
It can result in an score that is not between precision and recall.
threshold: Elements of `y_pred` greater than `threshold` are
converted to be 1, and the rest 0. If `threshold` is
`None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
name: Optional. String name of the metric instance.
dtype: Optional. Data type of the metric result.
Returns:
F-1 Score: float.
Example:
>>> metric = keras.metrics.F1Score(threshold=0.5)
>>> y_true = np.array([[1, 1, 1],
... [1, 0, 0],
... [1, 1, 0]], np.int32)
>>> y_pred = np.array([[0.2, 0.6, 0.7],
... [0.2, 0.6, 0.6],
... [0.6, 0.8, 0.0]], np.float32)
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
array([0.5 , 0.8 , 0.6666667], dtype=float32)
"""
def __init__(
self,
average=None,
threshold=None,
name="f1_score",
dtype=None,
):
super().__init__(
average=average,
beta=1.0,
threshold=threshold,
name=name,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/confusion_metrics_test.py | keras/src/metrics/confusion_metrics_test.py | import json
import numpy as np
import pytest
from absl import logging
from absl.testing import parameterized
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.metrics import metrics_utils
class FalsePositivesTest(testing.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name="my_fp", thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, "my_fp")
self.assertLen(fp_obj.variables, 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, "my_fp")
self.assertLen(fp_obj2.variables, 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
fp_obj.update_state(y_true, y_pred)
self.assertAllClose(7.0, fp_obj.result())
def test_weighted(self):
fp_obj = metrics.FalsePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14.0, result)
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
fp_obj.update_state(y_true, y_pred)
self.assertAllClose([7.0, 4.0, 2.0], fp_obj.result())
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = (
(1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0),
(5.0, 15.0, 10.0, 0),
)
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125.0, 42.0, 12.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.FalsePositives(thresholds=[None])
class FalseNegativesTest(testing.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name="my_fn", thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, "my_fn")
self.assertLen(fn_obj.variables, 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, "my_fn")
self.assertLen(fn_obj2.variables, 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
fn_obj.update_state(y_true, y_pred)
self.assertAllClose(3.0, fn_obj.result())
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5.0, result)
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
fn_obj.update_state(y_true, y_pred)
self.assertAllClose([1.0, 4.0, 6.0], fn_obj.result())
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4.0, 16.0, 23.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.FalseNegatives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.FalseNegatives(thresholds=[None])
class TrueNegativesTest(testing.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name="my_tn", thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, "my_tn")
self.assertLen(tn_obj.variables, 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, "my_tn")
self.assertLen(tn_obj2.variables, 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose(3.0, tn_obj.result())
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4.0, result)
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose([2.0, 5.0, 7.0], tn_obj.result())
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5.0, 15.0, 23.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.TrueNegatives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.TrueNegatives(thresholds=[None])
class TruePositiveTest(testing.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name="my_tp", thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, "my_tp")
self.assertLen(tp_obj.variables, 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, "my_tp")
self.assertLen(tp_obj2.variables, 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
tp_obj.update_state(y_true, y_pred)
self.assertAllClose(7.0, tp_obj.result())
def test_weighted(self):
tp_obj = metrics.TruePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12.0, result)
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
tp_obj.update_state(y_true, y_pred)
self.assertAllClose([6.0, 3.0, 1.0], tp_obj.result())
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = 37.0
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([222.0, 111.0, 37.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.TruePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.TruePositives(thresholds=[None])
class PrecisionTest(testing.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name="my_precision", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(p_obj.name, "my_precision")
self.assertLen(p_obj.variables, 2)
self.assertEqual(
[v.name for v in p_obj.variables],
["true_positives", "false_positives"],
)
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, "my_precision")
self.assertLen(p_obj2.variables, 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = np.array([1, 0, 1, 0])
y_true = np.array([0, 1, 1, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, result)
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs)
y_true = np.array(1 - inputs)
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, result)
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = np.array([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = np.array([[0, 1, 1, 0], [1, 0, 0, 1]])
result = p_obj(
y_true,
y_pred,
sample_weight=np.array([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, result)
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = np.array([0, 0, 0, 0])
y_true = np.array([0, 0, 0, 0])
result = p_obj(y_true, y_pred)
self.assertEqual(0, result)
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = np.array([1, 0, 0.6, 0])
y_true = np.array([0, 1, 1, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual([0.5, 0.0], result, 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[4, 0], [3, 1]], dtype="float32")
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual([expected_precision, 0], result, 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[4, 0], [3, 1]], dtype="float32")
for _ in range(2):
p_obj.update_state(y_true, y_pred, sample_weight=weights)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual([expected_precision, 0], p_obj.result(), 1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = np.array([0.2, 0.1, 0.5, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1.0 / 3, result)
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = np.array([[0.2, 0.1, 0.4, 0, 0.2]])
y_true1 = np.array([[0, 1, 1, 0, 1]])
p_obj(y_true1, y_pred1, sample_weight=np.array([[1, 4, 2, 3, 5]]))
y_pred2 = np.array([0.2, 0.6, 0.4, 0.2, 0.2])
y_true2 = np.array([1, 0, 1, 1, 1])
result = p_obj(y_true2, y_pred2, sample_weight=np.array(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, result)
def test_unweighted_class_id_should_throw_error_1d(self):
p_obj = metrics.Precision(class_id=2)
y_pred = np.array([0.2, 0.1, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
with self.assertRaisesRegex(
ValueError,
r"When class_id is provided, y_pred must be a 2D array "
r"with shape \(num_samples, num_classes\), found shape:.*",
):
p_obj(y_true, y_pred)
def test_unweighted_class_id_multiclass(self):
p_obj = metrics.Precision(class_id=1)
y_pred = np.array(
[
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.2],
[0.2, 0.6, 0.2],
[0.7, 0.2, 0.1],
[0.1, 0.1, 0.8],
]
)
y_true = np.array(
[
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
)
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1.0, result)
self.assertAlmostEqual(1.0, p_obj.true_positives)
self.assertAlmostEqual(0.0, p_obj.false_positives)
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=0.7, top_k=2)
y_pred = np.array([0.2, 0.8, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 1])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, result)
self.assertAlmostEqual(1, p_obj.true_positives)
self.assertAlmostEqual(0, p_obj.false_positives)
class RecallTest(testing.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name="my_recall", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(r_obj.name, "my_recall")
self.assertLen(r_obj.variables, 2)
self.assertEqual(
[v.name for v in r_obj.variables],
["true_positives", "false_negatives"],
)
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, "my_recall")
self.assertLen(r_obj2.variables, 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = np.array([1, 0, 1, 0])
y_true = np.array([0, 1, 1, 0])
self.assertAlmostEqual(0.5, r_obj(y_true, y_pred))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs)
y_true = np.array(1 - inputs)
self.assertAlmostEqual(0, r_obj(y_true, y_pred))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = np.array([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = np.array([[0, 1, 1, 0], [1, 0, 0, 1]])
result = r_obj(
y_true,
y_pred,
sample_weight=np.array([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, result)
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = np.array([0, 0, 0, 0])
y_true = np.array([0, 0, 0, 0])
self.assertEqual(0, r_obj(y_true, y_pred))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = np.array([1, 0, 0.6, 0])
y_true = np.array([0, 1, 1, 0])
self.assertAllClose([0.5, 0.0], r_obj(y_true, y_pred), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[1, 4], [3, 2]], dtype="float32")
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_recall = weighted_tp / weighted_positives
self.assertAllClose([expected_recall, 0], result, 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[1, 4], [3, 2]], dtype="float32")
for _ in range(2):
r_obj.update_state(y_true, y_pred, sample_weight=weights)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_recall = weighted_tp / weighted_positives
self.assertAllClose([expected_recall, 0], r_obj.result(), 1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = np.array([0.2, 0.1, 0.5, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
self.assertAlmostEqual(0.5, r_obj(y_true, y_pred))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = np.array([[0.2, 0.1, 0.4, 0, 0.2]])
y_true1 = np.array([[0, 1, 1, 0, 1]])
r_obj(y_true1, y_pred1, sample_weight=np.array([[1, 4, 2, 3, 5]]))
y_pred2 = np.array([0.2, 0.6, 0.4, 0.2, 0.2])
y_true2 = np.array([1, 0, 1, 1, 1])
result = r_obj(y_true2, y_pred2, sample_weight=np.array(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, result)
def test_unweighted_class_id_should_throw_error_1d(self):
r_obj = metrics.Recall(class_id=2)
y_pred = np.array([0.2, 0.1, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
with self.assertRaisesRegex(
ValueError,
r"When class_id is provided, y_pred must be a 2D array "
r"with shape \(num_samples, num_classes\), found shape:.*",
):
r_obj(y_true, y_pred)
def test_unweighted_class_id_multiclass(self):
r_obj = metrics.Recall(class_id=1)
y_pred = np.array(
[
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.2],
[0.2, 0.6, 0.2],
[0.7, 0.2, 0.1],
[0.1, 0.1, 0.8],
]
)
y_true = np.array(
[
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
)
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1.0, result)
self.assertAlmostEqual(1.0, r_obj.true_positives)
self.assertAlmostEqual(0.0, r_obj.false_negatives)
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=0.7, top_k=2)
y_pred = np.array([0.2, 0.8, 0.6, 0, 0.2])
y_true = np.array([1, 1, 1, 0, 1])
self.assertAlmostEqual(0.25, r_obj(y_true, y_pred))
self.assertAlmostEqual(1, r_obj.true_positives)
self.assertAlmostEqual(3, r_obj.false_negatives)
class SensitivityAtSpecificityTest(testing.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4,
num_thresholds=100,
class_id=12,
name="sensitivity_at_specificity_1",
)
self.assertEqual(s_obj.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.8, s_obj(y_true, y_pred))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, result)
def test_invalid_specificity(self):
with self.assertRaisesRegex(
ValueError, r"`specificity` must be in the range \[0, 1\]."
):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@pytest.mark.requires_trainable_backend
def test_handles_sas_metrics(self):
# Test for https://github.com/keras-team/keras/issues/19376
model = models.Sequential(
[
layers.Input((1,)),
layers.Dense(1),
]
)
sas = metrics.SpecificityAtSensitivity(0.5, name="sas")
model.compile(optimizer="adam", loss="crossentropy", metrics=[sas])
model.fit(np.ones((5, 1)), np.ones((5, 1)))
class SpecificityAtSensitivityTest(testing.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4,
num_thresholds=100,
class_id=12,
name="specificity_at_sensitivity_1",
)
self.assertEqual(s_obj.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(1.0)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.2, s_obj(y_true, y_pred))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, result)
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`sensitivity` must be in the range \[0, 1\]."
):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
class PrecisionAtRecallTest(testing.TestCase):
def test_config(self):
s_obj = metrics.PrecisionAtRecall(
0.4, num_thresholds=100, class_id=12, name="precision_at_recall_1"
)
self.assertEqual(s_obj.name, "precision_at_recall_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.recall, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.PrecisionAtRecall.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "precision_at_recall_1")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/reduction_metrics.py | keras/src/metrics/reduction_metrics.py | from keras.src import backend
from keras.src import initializers
from keras.src import losses
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.metrics.metric import Metric
from keras.src.saving import serialization_lib
def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype):
dtype = dtype or backend.floatx()
mask = backend.get_keras_mask(values)
values = ops.cast(values, dtype=dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype)
if mask is not None:
sample_weight = losses.loss.apply_mask(
sample_weight, mask, dtype=dtype, reduction="sum"
)
# Update dimensions of weights to match with values if possible.
values, sample_weight = losses.loss.squeeze_or_expand_to_same_rank(
values, sample_weight
)
# Reduce values to same ndim as weight array.
weight_ndim = len(sample_weight.shape)
values_ndim = len(values.shape)
if values_ndim > weight_ndim:
values = reduce_fn(
values, axis=list(range(weight_ndim, values_ndim))
)
# Broadcast sample_weight. It doesn't change the multiplication below
# but changes the sample_weight reduction applied later.
sample_weight = ops.broadcast_to(sample_weight, ops.shape(values))
values = values * sample_weight
if weight_ndim > 1:
sample_weight = reduce_fn(
sample_weight, axis=list(range(1, weight_ndim))
)
values_ndim = len(values.shape)
if values_ndim > 1:
values = reduce_fn(values, axis=list(range(1, values_ndim)))
return values, sample_weight
@keras_export("keras.metrics.Sum")
class Sum(Metric):
"""Compute the (weighted) sum of the given values.
For example, if `values` is `[1, 3, 5, 7]` then their sum is 16.
If `sample_weight` was specified as `[1, 1, 0, 0]` then the sum would be 4.
This metric creates one variable, `total`.
This is ultimately returned as the sum value.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result()
16.0
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result()
4.0
"""
def __init__(self, name="sum", dtype=None):
super().__init__(name=name, dtype=dtype)
self.total = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="total",
)
def update_state(self, values, sample_weight=None):
values, _ = reduce_to_samplewise_values(
values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype
)
self.total.assign_add(ops.sum(values))
def reset_state(self):
self.total.assign(0)
def result(self):
return ops.cast(self.total, self.dtype)
@keras_export("keras.metrics.Mean")
class Mean(Metric):
"""Compute the (weighted) mean of the given values.
For example, if values is `[1, 3, 5, 7]` then the mean is 4.
If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2.
This metric creates two variables, `total` and `count`.
The mean value returned is simply `total` divided by `count`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = Mean()
>>> m.update_state([1, 3, 5, 7])
>>> m.result()
4.0
>>> m.reset_state()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result()
2.0
"""
def __init__(self, name="mean", dtype=None):
super().__init__(name=name, dtype=dtype)
self.total = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="total",
)
self.count = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="count",
)
def update_state(self, values, sample_weight=None):
values, sample_weight = reduce_to_samplewise_values(
values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype
)
self.total.assign_add(ops.sum(values))
if sample_weight is not None:
num_samples = ops.sum(sample_weight)
elif len(values.shape) >= 1:
num_samples = ops.shape(values)[0]
else:
num_samples = 1
self.count.assign_add(ops.cast(num_samples, dtype=self.dtype))
def reset_state(self):
self.total.assign(0)
self.count.assign(0)
def result(self):
return ops.divide_no_nan(
self.total, ops.cast(self.count, dtype=self.dtype)
)
@keras_export("keras.metrics.MeanMetricWrapper")
class MeanMetricWrapper(Mean):
"""Wrap a stateless metric function with the `Mean` metric.
You could use this class to quickly build a mean metric from a function. The
function needs to have the signature `fn(y_true, y_pred)` and return a
per-sample loss array. `MeanMetricWrapper.result()` will return
the average metric value across all samples seen so far.
For example:
```python
def mse(y_true, y_pred):
return (y_true - y_pred) ** 2
mse_metric = MeanMetricWrapper(fn=mse)
```
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Keyword arguments to pass on to `fn`.
"""
def __init__(self, fn, name=None, dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
# If we are wrapping a Keras loss, register the metric's
# direction as "down" (needs to be minimized during training).
if (
self._fn in losses.ALL_OBJECTS
or hasattr(self._fn, "__class__")
and self._fn.__class__ in losses.ALL_OBJECTS
):
self._direction = "down"
def update_state(self, y_true, y_pred, sample_weight=None):
mask = backend.get_keras_mask(y_pred)
values = self._fn(y_true, y_pred, **self._fn_kwargs)
sample_weight = losses.loss.apply_mask(
sample_weight, mask, dtype=self.dtype, reduction="sum"
)
return super().update_state(values, sample_weight=sample_weight)
def get_config(self):
base_config = super().get_config()
config = {"fn": serialization_lib.serialize_keras_object(self._fn)}
config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if "fn" in config:
config = serialization_lib.deserialize_keras_object(config)
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/accuracy_metrics_test.py | keras/src/metrics/accuracy_metrics_test.py | import numpy as np
from keras.src import testing
from keras.src.metrics import accuracy_metrics
class AccuracyTest(testing.TestCase):
def test_config(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
self.assertEqual(acc_obj.name, "accuracy")
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj._dtype, "float32")
# Test get_config
acc_obj_config = acc_obj.get_config()
self.assertEqual(acc_obj_config["name"], "accuracy")
self.assertEqual(acc_obj_config["dtype"], "float32")
# Check save and restore config
acc_obj2 = accuracy_metrics.Accuracy.from_config(acc_obj_config)
self.assertEqual(acc_obj2.name, "accuracy")
self.assertEqual(len(acc_obj2.variables), 2)
self.assertEqual(acc_obj2._dtype, "float32")
def test_unweighted(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1], [2], [3], [4]])
y_pred = np.array([[0], [2], [3], [4]])
acc_obj.update_state(y_true, y_pred)
result = acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
def test_weighted(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1], [2], [3], [4]])
y_pred = np.array([[0], [2], [3], [4]])
sample_weight = np.array([1, 1, 0, 0])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_rank_1(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([1, 2, 3, 4])
y_pred = np.array([0, 2, 3, 4])
sample_weight = np.array([1, 1, 0, 0])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_weights(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1, 2], [3, 4]])
y_pred = np.array([[0, 2], [3, 4]])
sample_weight = np.array([[1, 0], [0, 1]])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_broadcast_weights(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1, 2], [3, 4]])
y_pred = np.array([[0, 2], [3, 4]])
sample_weight = np.array([[1, 0]])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
class BinaryAccuracyTest(testing.TestCase):
def test_config(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
self.assertEqual(bin_acc_obj.name, "binary_accuracy")
self.assertEqual(len(bin_acc_obj.variables), 2)
self.assertEqual(bin_acc_obj._dtype, "float32")
# Test get_config
bin_acc_obj_config = bin_acc_obj.get_config()
self.assertEqual(bin_acc_obj_config["name"], "binary_accuracy")
self.assertEqual(bin_acc_obj_config["dtype"], "float32")
# Check save and restore config
bin_acc_obj2 = accuracy_metrics.BinaryAccuracy.from_config(
bin_acc_obj_config
)
self.assertEqual(bin_acc_obj2.name, "binary_accuracy")
self.assertEqual(len(bin_acc_obj2.variables), 2)
self.assertEqual(bin_acc_obj2._dtype, "float32")
def test_unweighted(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [1], [0], [0.6]])
bin_acc_obj.update_state(y_true, y_pred)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
# Test broadcasting case
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([1, 1, 0, 0])
y_pred = np.array([[0.98], [1], [0], [0.6]])
bin_acc_obj.update_state(y_true, y_pred)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
def test_weighted(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [1], [0], [0.6]])
sample_weight = np.array([1, 0, 0, 1])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_rank_1(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([1, 1, 0, 0])
y_pred = np.array([0.98, 1, 0, 0.6])
sample_weight = np.array([1, 0, 0, 1])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_weights(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1, 1], [0, 0]])
y_pred = np.array([[0.98, 1], [0, 0.6]])
sample_weight = np.array([[1, 0], [0, 1]])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_broadcast_weights(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1, 1], [0, 0]])
y_pred = np.array([[0.98, 1], [0, 0.6]])
sample_weight = np.array([[1, 0]])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-3)
def test_threshold(self):
bin_acc_obj_1 = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32", threshold=0.3
)
bin_acc_obj_2 = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32", threshold=0.9
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [0.5], [0.1], [0.2]])
bin_acc_obj_1.update_state(y_true, y_pred)
bin_acc_obj_2.update_state(y_true, y_pred)
result_1 = bin_acc_obj_1.result()
result_2 = bin_acc_obj_2.result()
# Higher threshold must result in lower measured accuracy.
self.assertAllClose(result_1, 1.0)
self.assertAllClose(result_2, 0.75)
class CategoricalAccuracyTest(testing.TestCase):
def test_config(self):
cat_acc_obj = accuracy_metrics.CategoricalAccuracy(
name="categorical_accuracy", dtype="float32"
)
self.assertEqual(cat_acc_obj.name, "categorical_accuracy")
self.assertEqual(len(cat_acc_obj.variables), 2)
self.assertEqual(cat_acc_obj._dtype, "float32")
# Test get_config
cat_acc_obj_config = cat_acc_obj.get_config()
self.assertEqual(cat_acc_obj_config["name"], "categorical_accuracy")
self.assertEqual(cat_acc_obj_config["dtype"], "float32")
# Check save and restore config
cat_acc_obj2 = accuracy_metrics.CategoricalAccuracy.from_config(
cat_acc_obj_config
)
self.assertEqual(cat_acc_obj2.name, "categorical_accuracy")
self.assertEqual(len(cat_acc_obj2.variables), 2)
self.assertEqual(cat_acc_obj2._dtype, "float32")
def test_unweighted(self):
cat_acc_obj = accuracy_metrics.CategoricalAccuracy(
name="categorical_accuracy", dtype="float32"
)
y_true = np.array([[0, 0, 1], [0, 1, 0]])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
cat_acc_obj.update_state(y_true, y_pred)
result = cat_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted(self):
cat_acc_obj = accuracy_metrics.CategoricalAccuracy(
name="categorical_accuracy", dtype="float32"
)
y_true = np.array([[0, 0, 1], [0, 1, 0]])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
sample_weight = np.array([0.7, 0.3])
cat_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = cat_acc_obj.result()
self.assertAllClose(result, 0.3, atol=1e-3)
class SparseCategoricalAccuracyTest(testing.TestCase):
def test_config(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
self.assertEqual(sp_cat_acc_obj.name, "sparse_categorical_accuracy")
self.assertEqual(len(sp_cat_acc_obj.variables), 2)
self.assertEqual(sp_cat_acc_obj._dtype, "float32")
# Test get_config
sp_cat_acc_obj_config = sp_cat_acc_obj.get_config()
self.assertEqual(
sp_cat_acc_obj_config["name"], "sparse_categorical_accuracy"
)
self.assertEqual(sp_cat_acc_obj_config["dtype"], "float32")
# Check save and restore config
sp_cat_acc_obj2 = (
accuracy_metrics.SparseCategoricalAccuracy.from_config(
sp_cat_acc_obj_config
)
)
self.assertEqual(sp_cat_acc_obj2.name, "sparse_categorical_accuracy")
self.assertEqual(len(sp_cat_acc_obj2.variables), 2)
self.assertEqual(sp_cat_acc_obj2._dtype, "float32")
def test_unweighted(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
y_true = np.array([[2], [1]])
y_pred = np.array([[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
y_true = np.array([[2], [1]])
y_pred = np.array([[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
sample_weight = np.array([0.7, 0.3])
sp_cat_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 0.3, atol=1e-3)
def test_squeeze_y_true(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# Scenario with 100% accuracy for simplicity.
# y_true is a 2D tensor with shape (3, 1) to test squeeze.
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.05, 0.9]]
)
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
def test_cast_y_pred_dtype(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# Scenario with 100% accuracy for simplicity.
# y_true is a 1D tensor with shape (2,) to test cast.
y_true = np.array([0, 1], dtype=np.int64)
y_pred = np.array([[0.9, 0.1], [0.1, 0.9]], dtype=np.float32)
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
def test_reshape_matches(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# Scenario with 100% accuracy for simplicity.
# y_true is a 2D tensor with shape (2, 1) to test reshape.
y_true = np.array([[0], [0]], dtype=np.int64)
y_pred = np.array(
[[[0.9, 0.1, 0.0], [0.8, 0.15, 0.05]]], dtype=np.float32
)
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, np.array([1.0, 1.0]))
def test_squeeze_y_true_shape(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# True labels are in the shape (num_samples, 1) should be squeezed.
y_true = np.array([[0], [1], [2]])
y_pred = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
def test_cast_y_pred_to_match_y_true_dtype(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# True labels are integers, while predictions are floats.
y_true = np.array([0, 1, 2], dtype=np.int32)
y_pred = np.array(
[[0.9, 0.1, 0.0], [0.0, 0.9, 0.1], [0.1, 0.0, 0.9]],
dtype=np.float64,
)
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
def test_reshape_matches_to_original_y_true_shape(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
# True labels have an additional dimension that needs to be squeezed.
y_true = np.array([[0], [1]])
# Predictions must trigger a reshape of matches.
y_pred = np.array([[0.9, 0.1], [0.1, 0.9]])
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
def test_matching_shapes_without_squeeze(self):
sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy(
name="sparse_categorical_accuracy", dtype="float32"
)
y_true = np.array([2, 1, 0], dtype=np.int32)
y_pred = np.array(
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
dtype=np.float32,
)
# No need to squeeze or reshape.
sp_cat_acc_obj.update_state(y_true, y_pred)
result = sp_cat_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-4)
class TopKCategoricalAccuracyTest(testing.TestCase):
def test_config(self):
top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy(
k=1, name="top_k_categorical_accuracy", dtype="float32"
)
self.assertEqual(top_k_cat_acc_obj.name, "top_k_categorical_accuracy")
self.assertEqual(len(top_k_cat_acc_obj.variables), 2)
self.assertEqual(top_k_cat_acc_obj._dtype, "float32")
# Test get_config
top_k_cat_acc_obj_config = top_k_cat_acc_obj.get_config()
self.assertEqual(
top_k_cat_acc_obj_config["name"], "top_k_categorical_accuracy"
)
self.assertEqual(top_k_cat_acc_obj_config["dtype"], "float32")
self.assertEqual(top_k_cat_acc_obj_config["k"], 1)
# Check save and restore config
top_k_cat_acc_obj2 = (
accuracy_metrics.TopKCategoricalAccuracy.from_config(
top_k_cat_acc_obj_config
)
)
self.assertEqual(top_k_cat_acc_obj2.name, "top_k_categorical_accuracy")
self.assertEqual(len(top_k_cat_acc_obj2.variables), 2)
self.assertEqual(top_k_cat_acc_obj2._dtype, "float32")
self.assertEqual(top_k_cat_acc_obj2.k, 1)
def test_unweighted(self):
top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy(
k=1, name="top_k_categorical_accuracy", dtype="float32"
)
y_true = np.array([[0, 0, 1], [0, 1, 0]])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32")
top_k_cat_acc_obj.update_state(y_true, y_pred)
result = top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted(self):
top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy(
k=1, name="top_k_categorical_accuracy", dtype="float32"
)
y_true = np.array([[0, 0, 1], [0, 1, 0]])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32")
sample_weight = np.array([0.7, 0.3])
top_k_cat_acc_obj.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.3, atol=1e-3)
class SparseTopKCategoricalAccuracyTest(testing.TestCase):
def test_config(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1, name="sparse_top_k_categorical_accuracy", dtype="float32"
)
self.assertEqual(
sp_top_k_cat_acc_obj.name, "sparse_top_k_categorical_accuracy"
)
self.assertEqual(len(sp_top_k_cat_acc_obj.variables), 2)
self.assertEqual(sp_top_k_cat_acc_obj._dtype, "float32")
# Test get_config
sp_top_k_cat_acc_obj_config = sp_top_k_cat_acc_obj.get_config()
self.assertEqual(
sp_top_k_cat_acc_obj_config["name"],
"sparse_top_k_categorical_accuracy",
)
self.assertEqual(sp_top_k_cat_acc_obj_config["dtype"], "float32")
self.assertEqual(sp_top_k_cat_acc_obj_config["k"], 1)
# Check save and restore config
sp_top_k_cat_acc_obj2 = (
accuracy_metrics.SparseTopKCategoricalAccuracy.from_config(
sp_top_k_cat_acc_obj_config
)
)
self.assertEqual(
sp_top_k_cat_acc_obj2.name, "sparse_top_k_categorical_accuracy"
)
self.assertEqual(len(sp_top_k_cat_acc_obj2.variables), 2)
self.assertEqual(sp_top_k_cat_acc_obj2._dtype, "float32")
self.assertEqual(sp_top_k_cat_acc_obj2.k, 1)
self.assertFalse(sp_top_k_cat_acc_obj2.from_sorted_ids)
def test_config_from_sorted_ids(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1,
name="sparse_top_k_categorical_accuracy",
dtype="float32",
from_sorted_ids=True,
)
# Test get_config
sp_top_k_cat_acc_obj_config = sp_top_k_cat_acc_obj.get_config()
self.assertTrue(sp_top_k_cat_acc_obj_config["from_sorted_ids"])
# Check save and restore config
sp_top_k_cat_acc_obj2 = (
accuracy_metrics.SparseTopKCategoricalAccuracy.from_config(
sp_top_k_cat_acc_obj_config
)
)
self.assertTrue(sp_top_k_cat_acc_obj2.from_sorted_ids)
def test_unweighted(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1, name="sparse_top_k_categorical_accuracy", dtype="float32"
)
y_true = np.array([2, 1])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32")
sp_top_k_cat_acc_obj.update_state(y_true, y_pred)
result = sp_top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1, name="sparse_top_k_categorical_accuracy", dtype="float32"
)
y_true = np.array([2, 1])
y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32")
sample_weight = np.array([0.7, 0.3])
sp_top_k_cat_acc_obj.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = sp_top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.3, atol=1e-3)
def test_from_sorted_ids_unweighted(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1,
name="sparse_top_k_categorical_accuracy",
dtype="float32",
from_sorted_ids=True,
)
y_true = np.array([2, 1])
y_pred = np.array([[1, 0, 3], [1, 2, 3]])
sp_top_k_cat_acc_obj.update_state(y_true, y_pred)
result = sp_top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_from_sorted_ids_weighted(self):
sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy(
k=1,
name="sparse_top_k_categorical_accuracy",
dtype="float32",
from_sorted_ids=True,
)
y_true = np.array([2, 1])
y_pred = np.array([[1, 0, 3], [1, 2, 3]])
sample_weight = np.array([0.7, 0.3])
sp_top_k_cat_acc_obj.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = sp_top_k_cat_acc_obj.result()
self.assertAllClose(result, 0.3, atol=1e-3)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/reduction_metrics_test.py | keras/src/metrics/reduction_metrics_test.py | import numpy as np
from keras.src import backend
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.metrics import reduction_metrics
from keras.src.saving import register_keras_serializable
class SumTest(testing.TestCase):
def test_config(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
self.assertEqual(sum_obj.name, "sum")
self.assertEqual(len(sum_obj.variables), 1)
self.assertEqual(sum_obj._dtype, "float32")
# Check save and restore config
sum_obj2 = reduction_metrics.Sum.from_config(sum_obj.get_config())
self.assertEqual(sum_obj2.name, "sum")
self.assertEqual(len(sum_obj2.variables), 1)
self.assertEqual(sum_obj2._dtype, "float32")
def test_unweighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7])
result = sum_obj.result()
self.assertAllClose(result, 16.0, atol=1e-3)
def test_weighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = sum_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted_nd(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 9.0, atol=1e-3)
def test_weighted_nd_broadcast(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 6.0, atol=1e-3)
class MeanTest(testing.TestCase):
def test_config(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
self.assertEqual(mean_obj.name, "mean")
self.assertEqual(len(mean_obj.variables), 2)
self.assertEqual(mean_obj._dtype, "float32")
# Check save and restore config
mean_obj2 = reduction_metrics.Mean.from_config(mean_obj.get_config())
self.assertEqual(mean_obj2.name, "mean")
self.assertEqual(len(mean_obj2.variables), 2)
self.assertEqual(mean_obj2._dtype, "float32")
def test_unweighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7])
result = mean_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = mean_obj.result()
self.assertAllClose(result, 2.0, atol=1e-3)
def test_weighted_negative_weights(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7], sample_weight=[-1, -1, 0, 0])
result = mean_obj.result()
self.assertAllClose(result, 2.0, atol=1e-3)
def test_weighted_nd(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = mean_obj.result()
self.assertAllClose(result, 3.0, atol=1e-3)
def test_weighted_nd_broadcast(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 0]])
result = mean_obj.result()
self.assertAllClose(result, 3.0, atol=1e-3)
def test_weighted_dynamic_shapes(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
result = backend.compute_output_spec(
mean_obj, KerasTensor((None, 2)), KerasTensor((None, 2))
)
self.assertAllEqual(result.shape, ())
# How users would register a custom function or class to use with
# MeanMetricWrapper.
@register_keras_serializable(package="test", name="mse")
def mse(y_true, y_pred):
return (y_true - y_pred) ** 2
class MetricWrapperTest(testing.TestCase):
def test_config(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
self.assertEqual(mse_obj.name, "mse")
self.assertEqual(len(mse_obj.variables), 2)
self.assertEqual(mse_obj._dtype, "float32")
# Check save and restore config
mse_obj2 = reduction_metrics.MeanMetricWrapper.from_config(
mse_obj.get_config()
)
self.assertEqual(mse_obj2.name, "mse")
self.assertEqual(len(mse_obj2.variables), 2)
self.assertEqual(mse_obj2._dtype, "float32")
self.assertTrue("fn" in mse_obj2.get_config())
def test_unweighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
def test_weighted_broadcast(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([[1.0, 0.0, 0.5, 0.0, 1.0]])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.45, result, atol=1e-5)
def test_weighted_dynamic_shape(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
result = backend.compute_output_spec(
mse_obj,
KerasTensor((None, 5)),
KerasTensor((None, 5)),
KerasTensor((None, 5)),
)
self.assertAllEqual(result.shape, ())
def test_binary_accuracy_with_boolean_inputs(self):
inp = layers.Input(shape=(1,))
out = inp > 0.5
model = models.Model(inputs=inp, outputs=out)
x = np.random.rand(32, 1)
y = x > 0.5
res = model.predict(x)
metric = metrics.BinaryAccuracy()
metric.update_state(y, res)
result = metric.result()
assert result == 1.0
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/confusion_metrics.py | keras/src/metrics/confusion_metrics.py | import numpy as np
from keras.src import activations
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.metrics import metrics_utils
from keras.src.metrics.metric import Metric
from keras.src.utils.python_utils import to_list
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`
conditions.
thresholds: (Optional) Defaults to `0.5`. A float value or a python list
/ tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(
self, confusion_matrix_cond, thresholds=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.accumulator = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="accumulator",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
sample_weight=sample_weight,
)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return backend.convert_to_tensor(result)
def get_config(self):
config = {"thresholds": self.init_thresholds}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.FalsePositives")
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.FalseNegatives")
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.FalseNegatives()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.TrueNegatives")
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.TruePositives")
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.TruePositives()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.Precision")
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
The metric creates two local variables, `true_positives` and
`false_positives` that are used to compute the precision. This value is
ultimately returned as `precision`, an idempotent operation that simply
divides `true_positives` by the sum of `true_positives` and
`false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry
is correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in
the top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Args:
thresholds: (Optional) A float value, or a Python list/tuple of float
threshold values in `[0, 1]`. A threshold is compared with
prediction values to determine the truth value of predictions (i.e.,
above the threshold is `True`, below is `False`). If used with a
loss function that sets `from_logits=True` (i.e. no sigmoid applied
to predictions), `thresholds` should be set to 0. One metric value
is generated for each threshold value. If neither `thresholds` nor
`top_k` are set, the default is to calculate precision with
`thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.Precision()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
0.6666667
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
>>> # With top_k=2, it will calculate precision over y_true[:2]
>>> # and y_pred[:2]
>>> m = keras.metrics.Precision(top_k=2)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result()
0.0
>>> # With top_k=4, it will calculate precision over y_true[:4]
>>> # and y_pred[:4]
>>> m = keras.metrics.Precision(top_k=4)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Precision()])
```
Usage with a loss with `from_logits=True`:
```python
model.compile(optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.Precision(thresholds=0)])
```
"""
def __init__(
self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_positives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as
`y_pred`. Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range
`[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
result = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_positives),
)
return result[0] if len(self.thresholds) == 1 else result
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_positives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {
"thresholds": self.init_thresholds,
"top_k": self.top_k,
"class_id": self.class_id,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.Recall")
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Args:
thresholds: (Optional) A float value, or a Python list/tuple of float
threshold values in `[0, 1]`. A threshold is compared with
prediction values to determine the truth value of predictions (i.e.,
above the threshold is `True`, below is `False`). If used with a
loss function that sets `from_logits=True` (i.e. no sigmoid
applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
If neither `thresholds` nor `top_k` are set,
the default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
0.6666667
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Recall()])
```
Usage with a loss with `from_logits=True`:
```python
model.compile(optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.Recall(thresholds=0)])
```
"""
def __init__(
self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_negatives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as
`y_pred`. Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range
`[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
result = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
return result[0] if len(self.thresholds) == 1 else result
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_negatives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {
"thresholds": self.init_thresholds,
"top_k": self.top_k,
"class_id": self.class_id,
}
base_config = super().get_config()
return {**base_config, **config}
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
"""
def __init__(
self, value, num_thresholds=200, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
if num_thresholds <= 0:
raise ValueError(
"Argument `num_thresholds` must be an integer > 0. "
f"Received: num_thresholds={num_thresholds}"
)
self.value = value
self.class_id = class_id
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
self._thresholds_distributed_evenly = False
else:
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)
]
self.thresholds = [0.0] + thresholds + [1.0]
self._thresholds_distributed_evenly = True
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_positives",
)
self.true_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_negatives",
)
self.false_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_negatives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
class_id=self.class_id,
sample_weight=sample_weight,
)
def reset_state(self):
num_thresholds = len(self.thresholds)
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_positives.assign(ops.zeros((num_thresholds,)))
self.true_negatives.assign(ops.zeros((num_thresholds,)))
self.false_negatives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {"class_id": self.class_id}
base_config = super().get_config()
return {**base_config, **config}
def _find_max_under_constraint(self, constrained, dependent, predicate):
"""Returns the maximum of dependent_statistic that satisfies the
constraint.
Args:
constrained: Over these values the constraint is specified. A rank-1
tensor.
dependent: From these values the maximum that satisfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `ops.greater`.
Returns:
maximal dependent value, if no value satisfies the constraint 0.0.
"""
feasible = predicate(constrained, self.value)
# Mask values based on whether they satisfy the constraint and take max.
return ops.max(
ops.multiply(dependent, ops.cast(feasible, dependent.dtype)),
initial=0,
)
@keras_export("keras.metrics.SensitivityAtSpecificity")
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes best sensitivity where specificity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such `(tp / (tp + fn))`.
`Specificity` measures the proportion of actual negatives that are correctly
identified as such `(tn / (tn + fp))`.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the sensitivity at the given specificity. The threshold for the
given specificity value is computed and used to evaluate the corresponding
sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.SensitivityAtSpecificity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 1])
>>> m.result()
0.333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.SensitivityAtSpecificity(specificity=0.5)])
```
"""
def __init__(
self,
specificity,
num_thresholds=200,
class_id=None,
name=None,
dtype=None,
):
if specificity < 0 or specificity > 1:
raise ValueError(
"Argument `specificity` must be in the range [0, 1]. "
f"Received: specificity={specificity}"
)
self.specificity = specificity
self.num_thresholds = num_thresholds
super().__init__(
specificity,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype,
)
def result(self):
sensitivities = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
specificities = ops.divide_no_nan(
self.true_negatives,
ops.add(self.true_negatives, self.false_positives),
)
return self._find_max_under_constraint(
specificities, sensitivities, ops.greater_equal
)
def get_config(self):
config = {
"num_thresholds": self.num_thresholds,
"specificity": self.specificity,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.SpecificityAtSensitivity")
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes best specificity where sensitivity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such `(tp / (tp + fn))`.
`Specificity` measures the proportion of actual negatives that are correctly
identified as such `(tn / (tn + fp))`.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the specificity at the given sensitivity. The threshold for the
given sensitivity value is computed and used to evaluate the corresponding
specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given sensitivity.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.SpecificityAtSensitivity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result()
0.66666667
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 2])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.SpecificityAtSensitivity(sensitivity=0.3)])
```
"""
def __init__(
self,
sensitivity,
num_thresholds=200,
class_id=None,
name=None,
dtype=None,
):
if sensitivity < 0 or sensitivity > 1:
raise ValueError(
"Argument `sensitivity` must be in the range [0, 1]. "
f"Received: sensitivity={sensitivity}"
)
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/metric_test.py | keras/src/metrics/metric_test.py | import pickle
import numpy as np
from keras.src import backend
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import metrics as metrics_module
from keras.src import ops
from keras.src import testing
from keras.src.metrics.metric import Metric
class ExampleMetric(Metric):
def __init__(self, name="mean_square_error", dtype=None):
super().__init__(name=name, dtype=dtype)
self.sum = self.add_variable(
name="sum", shape=(), initializer=initializers.Zeros()
)
self.total = self.add_variable(
name="total",
shape=(),
initializer=initializers.Zeros(),
dtype="int32",
)
def update_state(self, y_true, y_pred):
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
sum = ops.sum((y_true - y_pred) ** 2)
self.sum.assign(self.sum + sum)
batch_size = ops.shape(y_true)[0]
self.total.assign(self.total + batch_size)
def result(self):
_sum = ops.cast(self.sum, dtype=self.dtype)
_total = ops.cast(self.total, dtype=self.dtype)
_epsilon = ops.cast(backend.epsilon(), dtype=self.dtype)
return _sum / (_total + _epsilon)
def reset_state(self):
self.sum.assign(0)
self.total.assign(0)
class MetricTest(testing.TestCase):
def setUp(self):
self._global_dtype_policy = dtype_policies.dtype_policy.dtype_policy()
self._floatx = backend.floatx()
return super().setUp()
def tearDown(self):
dtype_policies.dtype_policy.set_dtype_policy(self._global_dtype_policy)
backend.set_floatx(self._floatx)
return super().tearDown()
def test_end_to_end_flow(self):
metric = ExampleMetric(name="mse")
self.assertEqual(metric.name, "mse")
self.assertEqual(len(metric.variables), 2)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric.update_state(y_true_batch, y_pred_batch)
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
metric.reset_state()
self.assertEqual(metric.result(), 0.0)
def test_stateless_update_state(self):
metric = ExampleMetric(name="mse")
self.assertEqual(len(metric.variables), 2)
original_variable_values = (
metric.variables[0].numpy(),
metric.variables[1].numpy(),
)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
metric_variables = metric.variables
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
self.assertAllClose(metric.variables[0], original_variable_values[0])
self.assertAllClose(metric.variables[1], original_variable_values[1])
metric.variables[0].assign(metric_variables[0])
metric.variables[1].assign(metric_variables[1])
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
if backend.backend() == "jax":
# Check no side effects.
import jax
@jax.jit
def update(metric_variables, y_true_batch, y_pred_batch):
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
update(metric_variables, y_true_batch, y_pred_batch)
def test_stateless_result(self):
metric = ExampleMetric(name="mse")
res = metric.stateless_result([ops.ones(()) * 12, ops.ones(()) * 3])
self.assertAllClose(res, 4.0)
def test_stateless_reset_state(self):
metric = ExampleMetric(name="mse")
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
metric.update_state(y_true, y_pred)
vars = metric.stateless_reset_state()
self.assertLen(vars, 2)
self.assertEqual(vars[0], 0)
self.assertEqual(vars[1], 0)
def test_variable_tracking(self):
# In list
metric = ExampleMetric(name="mse")
metric.more_vars = [backend.Variable(0.0), backend.Variable(1.0)]
self.assertEqual(len(metric.variables), 4)
# In dict
metric = ExampleMetric(name="mse")
metric.more_vars = {
"a": backend.Variable(0.0),
"b": backend.Variable(1.0),
}
self.assertEqual(len(metric.variables), 4)
# In nested structured
metric = ExampleMetric(name="mse")
metric.more_vars = {"a": [backend.Variable(0.0), backend.Variable(1.0)]}
self.assertEqual(len(metric.variables), 4)
def test_submetric_tracking(self):
# Plain attr
metric = ExampleMetric(name="mse")
metric.submetric = ExampleMetric(name="submse")
self.assertEqual(len(metric.variables), 4)
# In list
metric = ExampleMetric(name="mse")
metric.submetrics = [
ExampleMetric(name="submse1"),
ExampleMetric(name="submse2"),
]
self.assertEqual(len(metric.variables), 6)
# In dict
metric = ExampleMetric(name="mse")
metric.submetrics = {
"1": ExampleMetric(name="submse1"),
"2": ExampleMetric(name="submse2"),
}
self.assertEqual(len(metric.variables), 6)
# Two levels deep
metric = ExampleMetric(name="mse")
metric.submetric = ExampleMetric(name="submse")
metric.submetric.submetric = ExampleMetric(name="subsubmse")
self.assertEqual(len(metric.variables), 6)
def test_serialization(self):
self.run_class_serialization_test(
ExampleMetric(name="mse"),
custom_objects={"ExampleMetric": ExampleMetric},
)
def test_pickle(self):
metric = metrics_module.get("mse")
reloaded = pickle.loads(pickle.dumps(metric))
self.assertIsInstance(reloaded, metrics_module.MeanSquaredError)
def test_get_method(self):
metric = metrics_module.get("mse")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("mean_squared_error")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("categorical_accuracy")
self.assertIsInstance(metric, metrics_module.CategoricalAccuracy)
metric = metrics_module.get(None)
self.assertEqual(metric, None)
with self.assertRaises(ValueError):
metrics_module.get("typo")
def test_dtype_arg(self):
metric = ExampleMetric(name="mse", dtype="float16")
self.assertEqual(metric.name, "mse")
self.assertEqual(len(metric.variables), 2)
num_samples = 10
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples, atol=1e-3
)
self.assertDType(result, "float16")
# Test DTypePolicy for `dtype` argument
metric = ExampleMetric(
dtype=dtype_policies.DTypePolicy("mixed_float16")
)
metric.update_state(y_true, y_pred)
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples, atol=1e-3
)
self.assertDType(result, "float16")
# `dtype` setter should raise AttributeError
with self.assertRaises(AttributeError):
metric.dtype = "bfloat16"
def test_default_dtype(self):
y_true = np.random.random((10, 3))
y_pred = np.random.random((10, 3))
# Defaults to `keras.config.floatx()` not global `dtype_policy`
dtype_policies.dtype_policy.set_dtype_policy("mixed_float16")
metric = ExampleMetric()
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertDType(result, "float32")
backend.set_floatx("float16")
metric = ExampleMetric()
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertDType(result, backend.floatx())
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/__init__.py | keras/src/metrics/__init__.py | import inspect
from keras.src.api_export import keras_export
from keras.src.metrics.accuracy_metrics import Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy
from keras.src.metrics.accuracy_metrics import CategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.src.metrics.confusion_metrics import AUC
from keras.src.metrics.confusion_metrics import FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives
from keras.src.metrics.confusion_metrics import Precision
from keras.src.metrics.confusion_metrics import PrecisionAtRecall
from keras.src.metrics.confusion_metrics import Recall
from keras.src.metrics.confusion_metrics import RecallAtPrecision
from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.src.metrics.confusion_metrics import TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives
from keras.src.metrics.correlation_metrics import ConcordanceCorrelation
from keras.src.metrics.correlation_metrics import PearsonCorrelation
from keras.src.metrics.f_score_metrics import F1Score
from keras.src.metrics.f_score_metrics import FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU
from keras.src.metrics.iou_metrics import IoU
from keras.src.metrics.iou_metrics import MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU
from keras.src.metrics.metric import Metric
from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.src.metrics.probabilistic_metrics import KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean
from keras.src.metrics.reduction_metrics import MeanMetricWrapper
from keras.src.metrics.reduction_metrics import Sum
from keras.src.metrics.regression_metrics import CosineSimilarity
from keras.src.metrics.regression_metrics import LogCoshError
from keras.src.metrics.regression_metrics import MeanAbsoluteError
from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.src.metrics.regression_metrics import MeanSquaredError
from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.src.metrics.regression_metrics import R2Score
from keras.src.metrics.regression_metrics import RootMeanSquaredError
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
# Base
Metric,
Mean,
Sum,
MeanMetricWrapper,
# Regression
MeanSquaredError,
RootMeanSquaredError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredLogarithmicError,
CosineSimilarity,
LogCoshError,
R2Score,
# Classification
AUC,
FalseNegatives,
FalsePositives,
Precision,
PrecisionAtRecall,
Recall,
RecallAtPrecision,
SensitivityAtSpecificity,
SpecificityAtSensitivity,
TrueNegatives,
TruePositives,
# Correlation
ConcordanceCorrelation,
PearsonCorrelation,
# Hinge
Hinge,
SquaredHinge,
CategoricalHinge,
# Probabilistic
KLDivergence,
Poisson,
BinaryCrossentropy,
CategoricalCrossentropy,
SparseCategoricalCrossentropy,
# Accuracy
Accuracy,
BinaryAccuracy,
CategoricalAccuracy,
SparseCategoricalAccuracy,
TopKCategoricalAccuracy,
SparseTopKCategoricalAccuracy,
# F-Score
F1Score,
FBetaScore,
# IoU
IoU,
BinaryIoU,
MeanIoU,
OneHotIoU,
OneHotMeanIoU,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# TODO: Align with `tf.keras` and set the name attribute of metrics
# with the key name. Currently it uses default name of class definitions.
ALL_OBJECTS_DICT.update(
{
"bce": BinaryCrossentropy,
"BCE": BinaryCrossentropy,
"mse": MeanSquaredError,
"MSE": MeanSquaredError,
"mae": MeanAbsoluteError,
"MAE": MeanAbsoluteError,
"mape": MeanAbsolutePercentageError,
"MAPE": MeanAbsolutePercentageError,
"msle": MeanSquaredLogarithmicError,
"MSLE": MeanSquaredLogarithmicError,
}
)
@keras_export("keras.metrics.serialize")
def serialize(metric):
"""Serializes metric function or `Metric` instance.
Args:
metric: A Keras `Metric` instance or a metric function.
Returns:
Metric configuration dictionary.
"""
return serialization_lib.serialize_keras_object(metric)
@keras_export("keras.metrics.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized metric class/function instance.
Args:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings)
to custom objects (classes and functions) to be
considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function.
"""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.metrics.get")
def get(identifier):
"""Retrieves a Keras metric as a `function`/`Metric` class instance.
The `identifier` may be the string name of a metric function or class.
>>> metric = metrics.get("categorical_crossentropy")
>>> type(metric)
<class 'function'>
>>> metric = metrics.get("CategoricalCrossentropy")
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
You can also specify `config` of the metric to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Metric` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> metric = metrics.get(identifier)
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
Args:
identifier: A metric identifier. One of None or string name of a metric
function/class or metric configuration dictionary or a metric
function or a metric class instance
Returns:
A Keras metric as a `function`/ `Metric` class instance.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(f"Could not interpret metric identifier: {identifier}")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/f_score_metrics_test.py | keras/src/metrics/f_score_metrics_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.metrics import f_score_metrics
class FBetaScoreTest(testing.TestCase):
def _run_test(
self,
y_true,
y_pred,
sample_weights,
average,
beta,
threshold,
reference_result,
):
fbeta = f_score_metrics.FBetaScore(
average, beta, threshold, dtype="float32"
)
fbeta.update_state(y_true, y_pred, sample_weights)
result = fbeta.result()
self.assertAllClose(result, reference_result, atol=1e-6)
def test_config(self):
fbeta_obj = f_score_metrics.FBetaScore(
beta=0.5, threshold=0.3, average=None, dtype="float32"
)
self.assertEqual(fbeta_obj.beta, 0.5)
self.assertEqual(fbeta_obj.average, None)
self.assertEqual(fbeta_obj.threshold, 0.3)
self.assertEqual(fbeta_obj.dtype, "float32")
# Check save and restore config
fbeta_obj2 = f_score_metrics.FBetaScore.from_config(
fbeta_obj.get_config()
)
self.assertEqual(fbeta_obj2.beta, 0.5)
self.assertEqual(fbeta_obj2.average, None)
self.assertEqual(fbeta_obj2.threshold, 0.3)
self.assertEqual(fbeta_obj2.dtype, "float32")
@parameterized.parameters(
("micro", 0.5),
("micro", 1.0),
("micro", 2.0),
("macro", 0.5),
("macro", 1.0),
("macro", 2.0),
("weighted", 0.5),
("weighted", 1.0),
("weighted", 2.0),
)
def test_fbeta_perfect_score(self, average, beta):
y_true = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=1.0,
)
@parameterized.parameters(
("micro", 0.5),
("micro", 1.0),
("micro", 2.0),
("macro", 0.5),
("macro", 1.0),
("macro", 2.0),
("weighted", 0.5),
("weighted", 1.0),
("weighted", 2.0),
)
def test_fbeta_worst_score(self, average, beta):
y_true = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=0.0,
)
@parameterized.parameters(
# average, beta, result
(None, 0.5, [0.71428573, 0.5, 0.833334]),
(None, 1.0, [0.8, 0.5, 0.6666667]),
(None, 2.0, [0.9090904, 0.5, 0.555556]),
("micro", 0.5, 0.6666667),
("micro", 1.0, 0.6666667),
("micro", 2.0, 0.6666667),
("macro", 0.5, 0.6825397),
("macro", 1.0, 0.6555555),
("macro", 2.0, 0.6548822),
("weighted", 0.5, 0.6825397),
("weighted", 1.0, 0.6555555),
("weighted", 2.0, 0.6548822),
)
def test_fbeta_random_score(self, average, beta, result):
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
y_true = [[0, 0, 1], [1, 1, 0], [1, 1, 1]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=result,
)
@parameterized.parameters(
# average, beta, result
(None, 0.5, [0.9090904, 0.555556, 1.0]),
(None, 1.0, [0.8, 0.6666667, 1.0]),
(None, 2.0, [0.71428573, 0.833334, 1.0]),
("micro", 0.5, 0.833334),
("micro", 1.0, 0.833334),
("micro", 2.0, 0.833334),
("macro", 0.5, 0.821549),
("macro", 1.0, 0.822222),
("macro", 2.0, 0.849206),
("weighted", 0.5, 0.880471),
("weighted", 1.0, 0.844445),
("weighted", 2.0, 0.829365),
)
def test_fbeta_random_score_none(self, average, beta, result):
y_true = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
y_pred = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=None,
reference_result=result,
)
@parameterized.parameters(
# average, beta, sample_weights, result
(None, 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.909091, 0.555556, 1.0]),
(None, 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.9375, 0.714286, 1.0]),
(None, 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.8, 0.666667, 1.0]),
(None, 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.857143, 0.8, 1.0]),
(None, 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.714286, 0.833333, 1.0]),
(None, 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.789474, 0.909091, 1.0]),
("micro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("macro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.821549),
("macro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.883929),
("macro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.822222),
("macro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.885714),
("macro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.849206),
("macro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.899522),
("weighted", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.880471),
("weighted", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.917857),
("weighted", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.844444),
("weighted", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.902857),
("weighted", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.829365),
("weighted", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.897608),
)
def test_fbeta_weighted_random_score_none(
self, average, beta, sample_weights, result
):
y_true = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
y_pred = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
self._run_test(
y_true,
y_pred,
sample_weights,
average=average,
beta=beta,
threshold=None,
reference_result=result,
)
def test_invalid_average_raises_value_error(self):
expected_message = (
"Invalid `average` argument value. Expected one of: "
r"\{None, 'micro', 'macro', 'weighted'\}. "
"Received: average=invalid_average"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="invalid_average",
beta=1.0,
threshold=None,
dtype="float32",
)
def test_beta_integer_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta=1, threshold=None, dtype="float32"
)
def test_beta_string_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta="1.0", threshold=None, dtype="float32"
)
def test_beta_none_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta=None, threshold=None, dtype="float32"
)
def test_beta_zero_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=0.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=0.0, threshold=None, dtype="float32"
)
def test_beta_negative_one_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=-1.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=-1.0, threshold=None, dtype="float32"
)
def test_beta_negative_half_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=-0.5"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=-0.5, threshold=None, dtype="float32"
)
def test_threshold_not_float_raises_value_error(self):
expected_message_pattern = (
"Invalid `threshold` argument value. "
"It should be a Python float. "
"Received: threshold=1 of type '<class 'int'>'"
)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=1, dtype="float32"
)
def test_threshold_string_raises_value_error(self):
expected_message_pattern = (
"Invalid `threshold` argument value. "
"It should be a Python float. "
"Received: threshold=0.5 of type '<class 'str'>'"
)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold="0.5", dtype="float32"
)
def test_threshold_above_one_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=1.1"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=1.1, dtype="float32"
)
def test_threshold_zero_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=0.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=0.0, dtype="float32"
)
def test_threshold_negative_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=-0.5"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=-0.5, dtype="float32"
)
def test_non_2d_input_shapes_raises_value_error(self):
fbeta = f_score_metrics.FBetaScore(beta=1.0, dtype="float32")
y_true_shape = (2, 3, 4)
y_pred_shape = (2, 3, 4)
expected_error_message = (
"FBetaScore expects 2D inputs with shape "
r"\(batch_size, output_dim\)\. Received input "
r"shapes: y_pred\.shape=\(2, 3, 4\) and "
r"y_true\.shape=\(2, 3, 4\)\."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
fbeta._build(y_true_shape, y_pred_shape)
def test_undefined_output_dim_raises_value_error(self):
fbeta = f_score_metrics.FBetaScore(beta=1.0, dtype="float32")
y_true_shape = (2, None)
y_pred_shape = (2, None)
expected_error_message = (
"FBetaScore expects 2D inputs with shape "
r"\(batch_size, output_dim\), with output_dim fully "
r"defined \(not None\)\. Received input "
r"shapes: y_pred\.shape=\(2, None\) and "
r"y_true\.shape=\(2, None\)\."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
fbeta._build(y_true_shape, y_pred_shape)
class F1ScoreTest(testing.TestCase):
def test_config(self):
f1_obj = f_score_metrics.F1Score(dtype="float32")
config = f1_obj.get_config()
self.assertNotIn("beta", config)
# Check save and restore config
f1_obj = f_score_metrics.F1Score.from_config(config)
self.assertEqual(f1_obj.average, None)
self.assertEqual(f1_obj.dtype, "float32")
def test_correctness(self):
f1 = f_score_metrics.F1Score()
fbeta = f_score_metrics.FBetaScore(beta=1.0)
y_true = np.array(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
)
y_pred = np.array(
[
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
)
fbeta.update_state(y_true, y_pred)
f1.update_state(y_true, y_pred)
self.assertAllClose(fbeta.result(), f1.result(), atol=1e-6)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/iou_metrics.py | keras/src/metrics/iou_metrics.py | import warnings
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.metrics.metric import Metric
from keras.src.metrics.metrics_utils import confusion_matrix
class _IoUBase(Metric):
"""Computes the confusion matrix for Intersection-Over-Union metrics.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
From IoUs of individual classes, the MeanIoU can be computed as the mean of
the individual IoUs.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_classes: The possible number of labels the prediction task can have.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) -1 is the dimension containing the logits.
Defaults to `-1`.
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
# defaulting to int to avoid issues with confusion matrix
super().__init__(name=name, dtype=dtype or "int")
# Metric should be maximized during optimization.
self._direction = "up"
self.num_classes = num_classes
self.ignore_class = ignore_class
self.sparse_y_true = sparse_y_true
self.sparse_y_pred = sparse_y_pred
self.axis = axis
self.total_cm = self.add_variable(
name="total_confusion_matrix",
shape=(num_classes, num_classes),
initializer=initializers.Zeros(),
dtype=self.dtype,
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
if not self.sparse_y_true:
y_true = ops.argmax(y_true, axis=self.axis)
if not self.sparse_y_pred:
y_pred = ops.argmax(y_pred, axis=self.axis)
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
# Flatten the input if its rank > 1.
if len(y_pred.shape) > 1:
y_pred = ops.reshape(y_pred, [-1])
if len(y_true.shape) > 1:
y_true = ops.reshape(y_true, [-1])
if sample_weight is None:
sample_weight = 1
else:
if (
hasattr(sample_weight, "dtype")
and "float" in str(sample_weight.dtype)
and "int" in str(self.dtype)
):
warnings.warn(
"You are passing weight as `float`, but dtype is `int`. "
"This may result in an incorrect weight due to type casting"
" Consider using integer weights."
)
sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)
if len(sample_weight.shape) > 1:
sample_weight = ops.reshape(sample_weight, [-1])
sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))
if self.ignore_class is not None:
ignore_class = ops.convert_to_tensor(
self.ignore_class, y_true.dtype
)
valid_mask = ops.not_equal(y_true, ignore_class)
y_true = y_true * ops.cast(valid_mask, y_true.dtype)
y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype)
if sample_weight is not None:
sample_weight = sample_weight * ops.cast(
valid_mask, sample_weight.dtype
)
y_pred = ops.cast(y_pred, dtype=self.dtype)
y_true = ops.cast(y_true, dtype=self.dtype)
sample_weight = ops.cast(sample_weight, dtype=self.dtype)
current_cm = confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=self.dtype,
)
return self.total_cm.assign(self.total_cm + current_cm)
def reset_state(self):
self.total_cm.assign(
ops.zeros(self.total_cm.shape, dtype=self.total_cm.dtype)
)
@keras_export("keras.metrics.IoU")
class IoU(_IoUBase):
"""Computes the Intersection-Over-Union metric for specific target classes.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Note, this class first computes IoUs for all individual classes, then
returns the mean of IoUs for the classes that are specified by
`target_class_ids`. If `target_class_ids` has only one id value, the IoU of
that specific class is returned.
Args:
num_classes: The possible number of labels the prediction task can have.
target_class_ids: A tuple or list of target class ids for which the
metric is returned. To compute IoU for a specific class, a list
(or tuple) of a single id value should be provided.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) -1 is the dimension containing the logits.
Defaults to `-1`.
Examples:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # iou = [0.33, 0.33]
>>> m = keras.metrics.IoU(num_classes=2, target_class_ids=[0])
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> # cm = [[0.3, 0.3],
>>> # [0.3, 0.1]]
>>> # sum_row = [0.6, 0.4], sum_col = [0.6, 0.4],
>>> # true_positives = [0.3, 0.1]
>>> # iou = [0.33, 0.14]
>>> m.result()
0.33333334
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.IoU(num_classes=2, target_class_ids=[0])])
```
"""
def __init__(
self,
num_classes,
target_class_ids,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
super().__init__(
name=name,
num_classes=num_classes,
ignore_class=ignore_class,
sparse_y_true=sparse_y_true,
sparse_y_pred=sparse_y_pred,
axis=axis,
dtype=dtype,
)
if max(target_class_ids) >= num_classes:
raise ValueError(
f"Target class id {max(target_class_ids)} "
"is out of range, which is "
f"[{0}, {num_classes})."
)
self.target_class_ids = list(target_class_ids)
def result(self):
"""Compute the intersection-over-union via the confusion matrix."""
sum_over_row = ops.cast(
ops.sum(self.total_cm, axis=0), dtype=self.dtype
)
sum_over_col = ops.cast(
ops.sum(self.total_cm, axis=1), dtype=self.dtype
)
true_positives = ops.cast(ops.diag(self.total_cm), dtype=self.dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
target_class_ids = ops.convert_to_tensor(
self.target_class_ids, dtype="int32"
)
# Only keep the target classes
true_positives = ops.take_along_axis(
true_positives, target_class_ids, axis=-1
)
denominator = ops.take_along_axis(
denominator, target_class_ids, axis=-1
)
denominator = ops.cast(denominator, dtype="float32")
# If the denominator is 0, we need to ignore the class.
num_valid_entries = ops.sum(
ops.cast(ops.greater(denominator, 1e-9), dtype="float32")
)
iou = ops.divide(true_positives, denominator + backend.epsilon())
return ops.divide(
ops.sum(iou, axis=self.axis), num_valid_entries + backend.epsilon()
)
def get_config(self):
config = {
"num_classes": self.num_classes,
"target_class_ids": self.target_class_ids,
"ignore_class": self.ignore_class,
"sparse_y_true": self.sparse_y_true,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.metrics.BinaryIoU")
class BinaryIoU(IoU):
"""Computes the Intersection-Over-Union metric for class 0 and/or 1.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute IoUs for a binary classification task
where the predictions are provided as logits. First a `threshold` is applied
to the predicted values such that those that are below the `threshold` are
converted to class 0 and those that are above the `threshold` are converted
to class 1.
IoUs for classes 0 and 1 are then computed, the mean of IoUs for the classes
that are specified by `target_class_ids` is returned.
Note: with `threshold=0`, this metric has the same behavior as `IoU`.
Args:
target_class_ids: A tuple or list of target class ids for which the
metric is returned. Options are `[0]`, `[1]`, or `[0, 1]`. With
`[0]` (or `[1]`), the IoU metric for class 0 (or class 1,
respectively) is returned. With `[0, 1]`, the mean of IoUs for the
two classes is returned.
threshold: A threshold that applies to the prediction logits to convert
them to either predicted class 0 if the logit is below `threshold`
or predicted class 1 if the logit is above `threshold`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
>>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7],
... sample_weight=[0.2, 0.3, 0.4, 0.1])
>>> # cm = [[0.2, 0.4],
>>> # [0.3, 0.1]]
>>> # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5],
>>> # true_positives = [0.2, 0.1]
>>> # iou = [0.222, 0.125]
>>> m.result()
0.17361112
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.BinaryIoU(
target_class_ids=[0],
threshold=0.5
)]
)
```
"""
def __init__(
self,
target_class_ids=(0, 1),
threshold=0.5,
name=None,
dtype=None,
):
super().__init__(
num_classes=2,
target_class_ids=target_class_ids,
name=name,
dtype=dtype,
)
self.threshold = threshold
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Before the confusion matrix is updated, the predicted values are
thresholded to be:
0 for values that are smaller than the `threshold`
1 for values that are larger or equal to the `threshold`
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
# convert y_pred on float 32 and cast just after to dtype
y_pred = ops.convert_to_tensor(y_pred, dtype="float32")
y_pred = ops.cast(y_pred >= self.threshold, self.dtype)
return super().update_state(y_true, y_pred, sample_weight)
def get_config(self):
return {
"target_class_ids": self.target_class_ids,
"threshold": self.threshold,
"name": self.name,
"dtype": self._dtype,
}
@keras_export("keras.metrics.MeanIoU")
class MeanIoU(IoU):
"""Computes the mean Intersection-Over-Union metric.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Note that this class first computes IoUs for all individual classes, then
returns the mean of these values.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
target_class_ids = list(range(num_classes))
super().__init__(
name=name,
num_classes=num_classes,
target_class_ids=target_class_ids,
axis=axis,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=sparse_y_true,
sparse_y_pred=sparse_y_pred,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_true": self.sparse_y_true,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_export("keras.metrics.OneHotIoU")
class OneHotIoU(IoU):
"""Computes the Intersection-Over-Union metric for one-hot encoded labels.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute IoU for multi-class classification tasks
where the labels are one-hot encoded (the last axis should have one
dimension per class). Note that the predictions should also have the same
shape. To compute the IoU, first the labels and predictions are converted
back into integer format by taking the argmax over the class axis. Then the
same computation steps as for the base `IoU` class apply.
Note, if there is only one channel in the labels and predictions, this class
is the same as class `IoU`. In this case, use `IoU` instead.
Also, make sure that `num_classes` is equal to the number of classes in the
data, to avoid a "labels out of bound" error when the confusion matrix is
computed.
Args:
num_classes: The possible number of labels the prediction task can have.
target_class_ids: A tuple or list of target class ids for which the
metric is returned. To compute IoU for a specific class, a list
(or tuple) of a single id value should be provided.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Example:
>>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
... [0.1, 0.4, 0.5]])
>>> sample_weight = [0.1, 0.2, 0.3, 0.4]
>>> m = keras.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
>>> m.update_state(
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
>>> # cm = [[0, 0, 0.2+0.4],
>>> # [0.3, 0, 0],
>>> # [0, 0, 0.1]]
>>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
>>> # true_positives = [0, 0, 0.1]
>>> # single_iou = true_positives / (sum_row + sum_col - true_positives))
>>> # mean_iou = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
>>> m.result()
0.071
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.OneHotIoU(
num_classes=3,
target_class_id=[1]
)]
)
```
"""
def __init__(
self,
num_classes,
target_class_ids,
name=None,
dtype=None,
ignore_class=None,
sparse_y_pred=False,
axis=-1,
):
super().__init__(
num_classes=num_classes,
target_class_ids=target_class_ids,
name=name,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=False,
sparse_y_pred=sparse_y_pred,
axis=axis,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"target_class_ids": self.target_class_ids,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_export("keras.metrics.OneHotMeanIoU")
class OneHotMeanIoU(MeanIoU):
"""Computes mean Intersection-Over-Union metric for one-hot encoded labels.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute the mean IoU for multi-class
classification tasks where the labels are one-hot encoded (the last axis
should have one dimension per class). Note that the predictions should also
have the same shape. To compute the mean IoU, first the labels and
predictions are converted back into integer format by taking the argmax over
the class axis. Then the same computation steps as for the base `MeanIoU`
class apply.
Note, if there is only one channel in the labels and predictions, this class
is the same as class `MeanIoU`. In this case, use `MeanIoU` instead.
Also, make sure that `num_classes` is equal to the number of classes in the
data, to avoid a "labels out of bound" error when the confusion matrix is
computed.
Args:
num_classes: The possible number of labels the prediction task can have.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_pred: Whether predictions are encoded using natural numbers or
probability distribution vectors. If `False`, the `argmax`
function will be used to determine each sample's most likely
associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Example:
>>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
... [0.1, 0.4, 0.5]])
>>> sample_weight = [0.1, 0.2, 0.3, 0.4]
>>> m = keras.metrics.OneHotMeanIoU(num_classes=3)
>>> m.update_state(
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
>>> # cm = [[0, 0, 0.2+0.4],
>>> # [0.3, 0, 0],
>>> # [0, 0, 0.1]]
>>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
>>> # true_positives = [0, 0, 0.1]
>>> # single_iou = true_positives / (sum_row + sum_col - true_positives))
>>> # mean_iou = (0 + 0 + 0.1 / (0.7 + 0.1 - 0.1)) / 3
>>> m.result()
0.048
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.OneHotMeanIoU(num_classes=3)])
```
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_pred=False,
axis=-1,
):
super().__init__(
num_classes=num_classes,
axis=axis,
name=name,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=False,
sparse_y_pred=sparse_y_pred,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/metric.py | keras/src/metrics/metric.py | from keras.src import backend
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils.naming import auto_name
from keras.src.utils.tracking import Tracker
@keras_export(["keras.Metric", "keras.metrics.Metric"])
class Metric(KerasSaveable):
"""Encapsulates metric logic and state.
Args:
name: Optional name for the metric instance.
dtype: The dtype of the metric's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Example:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result())
```
Usage with `compile()` API:
```python
model = keras.Sequential()
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=keras.optimizers.RMSprop(0.01),
loss=keras.losses.CategoricalCrossentropy(),
metrics=[keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_variable()` like: `self.var = self.add_variable(...)`
* `update_state()`: Has all updates to the state variables like:
`self.var.assign(...)`.
* `result()`: Computes and returns a scalar value or a dict of scalar values
for the metric from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_variable(
shape=(),
initializer='zeros',
name='true_positives'
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.cast(y_true, "bool")
y_pred = ops.cast(y_pred, "bool")
values = ops.logical_and(
ops.equal(y_true, True), ops.equal(y_pred, True))
values = ops.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, self.dtype)
sample_weight = ops.broadcast_to(
sample_weight, ops.shape(values)
)
values = ops.multiply(values, sample_weight)
self.true_positives.assign(self.true_positives + ops.sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, dtype=None, name=None):
self.name = name or auto_name(self.__class__.__name__)
self._dtype_policy = dtype_policies.get(dtype or backend.floatx())
self._dtype = self._dtype_policy.compute_dtype
self._metrics = []
self._variables = []
self._tracker = Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
"metrics": (lambda x: isinstance(x, Metric), self._metrics),
}
)
def reset_state(self):
"""Reset all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
def update_state(self, *args, **kwargs):
"""Accumulate statistics for the metric."""
raise NotImplementedError
def stateless_update_state(self, metric_variables, *args, **kwargs):
if len(metric_variables) != len(self.variables):
raise ValueError(
"Argument `metric_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(metric_variables)}, but "
f"expected {len(self.variables)} variables."
)
# Gather variable mapping
mapping = list(zip(self.variables, metric_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.update_state(*args, **kwargs)
# Gather updated variables
metric_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
metric_variables.append(new_v)
else:
metric_variables.append(v)
return metric_variables
def result(self):
"""Compute the current metric value.
Returns:
A scalar tensor, or a dictionary of scalar tensors.
"""
raise NotImplementedError
def stateless_result(self, metric_variables):
if len(metric_variables) != len(self.variables):
raise ValueError(
"Argument `metric_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(metric_variables)}, but "
f"expected {len(self.variables)} variables."
)
# Gather variable mapping
mapping = list(zip(self.variables, metric_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping):
res = self.result()
return res
def stateless_reset_state(self):
# Call in stateless scope
with backend.StatelessScope() as scope:
self.reset_state()
# Gather updated variables
metric_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
metric_variables.append(new_v)
else:
metric_variables.append(v)
return metric_variables
@property
def dtype(self):
return self._dtype
def _obj_type(self):
return "Metric"
def add_variable(
self, shape, initializer, dtype=None, aggregation="sum", name=None
):
self._check_super_called()
with backend.name_scope(self.name.replace("/", ">"), caller=self):
initializer = initializers.get(initializer)
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
aggregation=aggregation,
synchronization="on_read",
name=name,
)
# Prevent double-tracking
self._tracker.add_to_store("variables", variable)
return variable
def add_weight(self, shape=(), initializer=None, dtype=None, name=None):
# Backwards compatibility alias
return self.add_variable(
shape=shape, initializer=initializer, dtype=dtype, name=name
)
@property
def variables(self):
variables = list(self._variables)
for metric in self._metrics:
variables.extend(metric.variables)
return variables
def __call__(self, *args, **kwargs):
self._check_super_called()
self.update_state(*args, **kwargs)
return self.result()
def get_config(self):
"""Return the serializable config of the metric."""
return {"name": self.name, "dtype": self.dtype}
@classmethod
def from_config(cls, config):
return cls(**config)
def __setattr__(self, name, value):
# Track Variables, Layers, Metrics
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
def _check_super_called(self):
if not hasattr(self, "_tracker"):
raise RuntimeError(
"You forgot to call `super().__init__()` "
"in the `__init__()` method. Go add it!"
)
def __repr__(self):
return f"<{self.__class__.__name__} name={self.name}>"
def __str__(self):
return self.__repr__()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/regression_metrics.py | keras/src/metrics/regression_metrics.py | import warnings
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.metrics import reduction_metrics
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.metrics.MeanSquaredError")
class MeanSquaredError(reduction_metrics.MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Formula:
```python
loss = mean(square(y_true - y_pred))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.25
"""
def __init__(self, name="mean_squared_error", dtype=None):
super().__init__(fn=mean_squared_error, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.MeanAbsoluteError")
class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Formula:
```python
loss = mean(abs(y_true - y_pred))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name="mean_absolute_error", dtype=None):
super().__init__(mean_absolute_error, name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.MeanAbsolutePercentageError")
class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper):
"""Computes mean absolute percentage error between `y_true` and `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanAbsolutePercentageError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
250000000.0
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
500000000.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name="mean_absolute_percentage_error", dtype=None):
super().__init__(mean_absolute_percentage_error, name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.MeanSquaredLogarithmicError")
class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper):
"""Computes mean squared logarithmic error between `y_true` and `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.12011322
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name="mean_squared_logarithmic_error", dtype=None):
super().__init__(mean_squared_logarithmic_error, name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.RootMeanSquaredError")
class RootMeanSquaredError(reduction_metrics.Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Formula:
```python
loss = sqrt(mean((y_pred - y_true) ** 2))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.70710677
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name="root_mean_squared_error", dtype=None):
super().__init__(name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
Defaults to `1`.
Returns:
Update op.
"""
y_true = ops.convert_to_tensor(y_true, self._dtype)
y_pred = ops.convert_to_tensor(y_pred, self._dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
error_sq = ops.square(y_pred - y_true)
return super().update_state(error_sq, sample_weight=sample_weight)
def result(self):
return ops.sqrt(super().result())
@keras_export("keras.metrics.CosineSimilarity")
class CosineSimilarity(reduction_metrics.MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
Formula:
```python
loss = sum(l2_norm(y_true) * l2_norm(y_pred))
```
See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to `-1`. The dimension along which the cosine
similarity is computed.
Examples:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> m = keras.metrics.CosineSimilarity(axis=1)
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> m.result()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
... sample_weight=[0.3, 0.7])
>>> m.result()
0.6999999
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name="cosine_similarity", dtype=None, axis=-1):
super().__init__(cosine_similarity, name, dtype=dtype, axis=axis)
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.LogCoshError")
class LogCoshError(reduction_metrics.MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
error = y_pred - y_true
logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.LogCoshError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.10844523
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.21689045
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras.metrics.LogCoshError()])
```
"""
def __init__(self, name="logcosh", dtype=None):
super().__init__(log_cosh, name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
# Adapted from TF-Addons implementation (RSquare class).
@keras_export("keras.metrics.R2Score")
class R2Score(reduction_metrics.Metric):
"""Computes R2 score.
Formula:
```python
sum_squares_residuals = sum((y_true - y_pred) ** 2)
sum_squares = sum((y_true - mean(y_true)) ** 2)
R2 = 1 - sum_squares_residuals / sum_squares
```
This is also called the
[coefficient of determination](
https://en.wikipedia.org/wiki/Coefficient_of_determination).
It indicates how close the fitted regression line
is to ground-truth data.
- The highest score possible is 1.0. It indicates that the predictors
perfectly accounts for variation in the target.
- A score of 0.0 indicates that the predictors do not
account for variation in the target.
- It can also be negative if the model is worse than random.
This metric can also compute the "Adjusted R2" score.
Args:
class_aggregation: Specifies how to aggregate scores corresponding to
different output classes (or target dimensions),
i.e. different dimensions on the last axis of the predictions.
Equivalent to `multioutput` argument in Scikit-Learn.
Should be one of
`None` (no aggregation), `"uniform_average"`,
`"variance_weighted_average"`.
num_regressors: Number of independent regressors used
("Adjusted R2" score). 0 is the standard R2 score.
Defaults to `0`.
name: Optional. string name of the metric instance.
dtype: Optional. data type of the metric result.
Example:
>>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
>>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
>>> metric = keras.metrics.R2Score()
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
>>> result
0.57142854
"""
def __init__(
self,
class_aggregation="uniform_average",
num_regressors=0,
name="r2_score",
dtype=None,
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
valid_class_aggregation_values = (
None,
"uniform_average",
"variance_weighted_average",
)
if class_aggregation not in valid_class_aggregation_values:
raise ValueError(
"Invalid value for argument `class_aggregation`. Expected "
f"one of {valid_class_aggregation_values}. "
f"Received: class_aggregation={class_aggregation}"
)
if num_regressors < 0:
raise ValueError(
"Invalid value for argument `num_regressors`. "
"Expected a value >= 0. "
f"Received: num_regressors={num_regressors}"
)
self.class_aggregation = class_aggregation
self.num_regressors = num_regressors
self.num_samples = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
name="num_samples",
)
self._built = False
def _build(self, y_true_shape, y_pred_shape):
if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
if y_pred_shape[-1] is None or y_true_shape[-1] is None:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim), with output_dim fully "
"defined (not None). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
num_classes = y_pred_shape[-1]
self.squared_sum = self.add_variable(
name="squared_sum",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.sum = self.add_variable(
name="sum",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.total_mse = self.add_variable(
name="residual",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.count = self.add_variable(
name="count",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
Defaults to `1`.
Returns:
Update op.
"""
y_true = ops.convert_to_tensor(y_true, dtype=self._dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
if not self._built:
self._build(y_true.shape, y_pred.shape)
if sample_weight is None:
sample_weight = 1
sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)
if len(sample_weight.shape) == 1:
# Make sure there's a features dimension
sample_weight = ops.expand_dims(sample_weight, axis=1)
sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))
weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype)
self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0))
self.squared_sum.assign(
self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0)
)
self.total_mse.assign(
self.total_mse
+ ops.sum(
(y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype),
axis=0,
)
)
self.count.assign(self.count + ops.sum(sample_weight, axis=0))
self.num_samples.assign(self.num_samples + ops.size(y_true))
def result(self):
mean = self.sum / self.count
total = self.squared_sum - self.sum * mean
raw_scores = 1 - (self.total_mse / total)
raw_scores = ops.where(ops.isinf(raw_scores), 0.0, raw_scores)
if self.class_aggregation == "uniform_average":
r2_score = ops.mean(raw_scores)
elif self.class_aggregation == "variance_weighted_average":
weighted_sum = ops.sum(total * raw_scores)
sum_of_weights = ops.sum(total)
r2_score = weighted_sum / sum_of_weights
else:
r2_score = raw_scores
if self.num_regressors != 0:
if self.num_regressors > self.num_samples - 1:
warnings.warn(
"More independent predictors than datapoints "
"in adjusted R2 score. Falling back to standard R2 score.",
stacklevel=2,
)
elif self.num_regressors == self.num_samples - 1:
warnings.warn(
"Division by zero in Adjusted R2 score. "
"Falling back to standard R2 score.",
stacklevel=2,
)
else:
n = ops.convert_to_tensor(self.num_samples, dtype="float32")
p = ops.convert_to_tensor(self.num_regressors, dtype="float32")
num = ops.multiply(
ops.subtract(1.0, r2_score), ops.subtract(n, 1.0)
)
den = ops.subtract(ops.subtract(n, p), 1.0)
r2_score = ops.subtract(1.0, ops.divide(num, den))
return r2_score
def reset_state(self):
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
def get_config(self):
config = {
"name": self.name,
"dtype": self.dtype,
"class_aggregation": self.class_aggregation,
"num_regressors": self.num_regressors,
}
base_config = super().get_config()
return {**base_config, **config}
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Formula:
```python
loss = sum(l2_norm(y_true) * l2_norm(y_pred))
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Cosine similarity tensor.
Example:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)
[0., 0.99999994, -0.99999994]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_pred = normalize(y_pred, axis=axis)
y_true = normalize(y_true, axis=axis)
return ops.sum(y_true * y_pred, axis=axis)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/correlation_metrics_test.py | keras/src/metrics/correlation_metrics_test.py | import numpy as np
from scipy.stats import pearsonr
from keras.src import testing
from keras.src.metrics import ConcordanceCorrelation
from keras.src.metrics import PearsonCorrelation
from keras.src.metrics import correlation_metrics
class CorrelationsTest(testing.TestCase):
def _get_data(self):
# Sample data for testing
y_true = np.array(
[[0, 1, 0.5], [1, 1, 0.2], [1, 1, 0.1], [0.1, 0.7, 0.0]],
dtype="float32",
)
y_pred = np.array(
[[0.1, 0.9, 0.5], [1, 0.9, 0.2], [0.2, 0.8, 0], [0.3, 0.3, 0.9]],
dtype="float32",
)
ccc_expected = np.array(
[0.97560976, 0.98765432, 0.46511628, -0.46376812]
)
# pcc_expected = np.array([1, 0.99339927, 0.69337525, -0.60999428])
pcc_expected = np.array(
[pearsonr(yt, yp).statistic for yt, yp in zip(y_true, y_pred)]
)
return y_true, y_pred, ccc_expected, pcc_expected
def test_pearson_function(self):
"""Test the functional API for Pearson Correlation Coefficient."""
y_true, y_pred, _, pcc_expected = self._get_data()
result = correlation_metrics.pearson_correlation(
y_true, y_pred, axis=-1
)
self.assertAllClose(result, pcc_expected)
def test_concordance_function(self):
"""Test the functional API for Concordance Correlation Coefficient."""
y_true, y_pred, ccc_expected, _ = self._get_data()
result = correlation_metrics.concordance_correlation(
y_true, y_pred, axis=-1
)
self.assertAllClose(result, ccc_expected)
def test_pearson_class(self):
"""Test the PearsonCorrelation metric class."""
y_true, y_pred, _, pcc_expected = self._get_data()
m = PearsonCorrelation(axis=-1, dtype="float32")
m.update_state(y_true[:2], y_pred[:2])
self.assertAllClose(m.result(), np.mean(pcc_expected[:2]))
m.update_state(y_true[2:], y_pred[2:])
self.assertAllClose(m.result(), np.mean(pcc_expected))
def test_concordance_class(self):
"""Test the ConcordanceCorrelation metric class."""
y_true, y_pred, ccc_expected, _ = self._get_data()
m = ConcordanceCorrelation(axis=-1, dtype="float32")
m.update_state(y_true[:2], y_pred[:2])
self.assertAllClose(m.result(), np.mean(ccc_expected[:2]))
m.update_state(y_true[2:], y_pred[2:])
self.assertAllClose(m.result(), np.mean(ccc_expected))
def test_pearson_config(self):
"""Test the get_config method for PearsonCorrelation."""
m = PearsonCorrelation(axis=-1, dtype="float16")
config = m.get_config()
self.assertEqual(config["axis"], -1)
self.assertEqual(config["dtype"], "float16")
self.assertEqual(config["name"], "pearson_correlation")
def test_concordance_config(self):
"""Test the get_config method for ConcordanceCorrelation."""
m = ConcordanceCorrelation(axis=-1, dtype="float32")
config = m.get_config()
self.assertEqual(config["axis"], -1)
self.assertEqual(config["dtype"], "float32")
self.assertEqual(config["name"], "concordance_correlation")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/metrics_utils.py | keras/src/metrics/metrics_utils.py | from enum import Enum
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.utils.python_utils import to_list
NEG_INF = -1e10
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [
t for t in thresholds if t is None or t < 0 or t > 1
]
if invalid_thresholds:
raise ValueError(
"Threshold values must be in [0, 1]. "
f"Received: {invalid_thresholds}"
)
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(
default_threshold if thresholds is None else thresholds
)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = "tp"
FALSE_POSITIVES = "fp"
TRUE_NEGATIVES = "tn"
FALSE_NEGATIVES = "fn"
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = "ROC"
PR = "PR"
PRGAIN = "PRGAIN"
@staticmethod
def from_str(key):
if key in ("pr", "PR"):
return AUCCurve.PR
elif key in ("roc", "ROC"):
return AUCCurve.ROC
elif key in ("prgain", "PRGAIN"):
return AUCCurve.PRGAIN
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC", "PRGAIN"]'
)
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = "interpolation"
MAJORING = "majoring"
MINORING = "minoring"
@staticmethod
def from_str(key):
if key in ("interpolation", "Interpolation"):
return AUCSummationMethod.INTERPOLATION
elif key in ("majoring", "Majoring"):
return AUCSummationMethod.MAJORING
elif key in ("minoring", "Minoring"):
return AUCSummationMethod.MINORING
else:
raise ValueError(
f'Invalid AUC summation method value: "{key}". '
'Expected values are ["interpolation", "majoring", "minoring"]'
)
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False,
):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in ops.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use ops.segment_sum() to update the buckets in one pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = ops.floor(y_pred * num_buckets)
= ops.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use ops.ceil() - 1 to achieve this.
tp_bucket_value = ops.segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket
# 0, and 1 value contributed by bucket 1. When we aggregate them to
# together, the result become [a + b + c, b + c, c], since large thresholds
# will always contribute to the value for smaller thresholds.
true_positive = ops.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid
keys and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be
cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`.
It need to be evenly distributed (the diff between each element need
to be the same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the values of
`variables_to_update` must have a second dimension equal to the
number of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same
rank as `y_true`, and must be broadcastable to `y_true` (i.e., all
dimensions must be either `1`, or the same as the corresponding
`y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading
and tailing thresholds has any epsilon added for floating point
imprecisions. It will change how we handle the leading and tailing
bucket.
"""
num_thresholds = ops.shape(thresholds)[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = ops.broadcast_to(
ops.cast(sample_weights, dtype=y_pred.dtype), ops.shape(y_pred)
)
if not multi_label:
sample_weights = ops.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
if not multi_label:
label_weights = ops.reshape(label_weights, [-1])
weights = ops.cast(
ops.multiply(sample_weights, label_weights), y_true.dtype
)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = ops.clip(y_pred, x_min=0.0, x_max=1.0)
y_true = ops.cast(ops.cast(y_true, "bool"), y_true.dtype)
if not multi_label:
y_true = ops.reshape(y_true, [-1])
y_pred = ops.reshape(y_pred, [-1])
true_labels = ops.multiply(y_true, weights)
false_labels = ops.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = (
ops.ceil(y_pred * (ops.cast(num_thresholds, dtype=y_pred.dtype) - 1))
- 1
)
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = ops.relu(bucket_indices)
bucket_indices = ops.cast(bucket_indices, "int32")
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though
# them.
true_labels = ops.transpose(true_labels)
false_labels = ops.transpose(false_labels)
bucket_indices = ops.transpose(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = (
label_and_bucket_index[0],
label_and_bucket_index[1],
)
return ops.segment_sum(
data=label,
segment_ids=bucket_index,
num_segments=num_thresholds,
)
tp_bucket_v = backend.vectorized_map(
gather_bucket,
(true_labels, bucket_indices),
)
fp_bucket_v = backend.vectorized_map(
gather_bucket, (false_labels, bucket_indices)
)
tp = ops.transpose(ops.flip(ops.cumsum(ops.flip(tp_bucket_v), axis=1)))
fp = ops.transpose(ops.flip(ops.cumsum(ops.flip(fp_bucket_v), axis=1)))
else:
tp_bucket_v = ops.segment_sum(
data=true_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
fp_bucket_v = ops.segment_sum(
data=false_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
tp = ops.flip(ops.cumsum(ops.flip(tp_bucket_v)))
fp = ops.flip(ops.cumsum(ops.flip(fp_bucket_v)))
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (
ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
or ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
):
if multi_label:
total_true_labels = ops.sum(true_labels, axis=1)
total_false_labels = ops.sum(false_labels, axis=1)
else:
total_true_labels = ops.sum(true_labels)
total_false_labels = ops.sum(false_labels)
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
variable.assign(variable + tp)
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
variable.assign(variable + fp)
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
variable.assign(variable + tn)
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
variable.assign(variable + fn)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrics like AUC where each individual threshold need to be
evaluated.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is
ranged in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (
num_thresholds - 1
)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(
variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False,
):
"""Updates the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds
are provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates
an `update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are
in the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited
to the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true`
dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the values of
`variables_to_update` must have a second dimension equal to the number
of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or
if `variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError(
"`label_weights` for multilabel data should be handled "
"outside of `update_confusion_matrix_variables` when "
"`multi_label` is True."
)
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)
):
raise ValueError(
"Please provide at least one valid confusion matrix "
"variable to update. Valid variable key options are: "
f'"{list(ConfusionMatrix)}". '
f'Received: "{variables_to_update.keys()}"'
)
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = ops.cast(y_true, dtype=variable_dtype)
y_pred = ops.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will
# be handled bit differently as the corner case. At this point,
# thresholds should be a list/array with more than 2 items, and ranged
# between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = ops.convert_to_tensor(thresholds, dtype=variable_dtype)
num_thresholds = ops.shape(thresholds)[0]
if multi_label:
one_thresh = ops.equal(
np.array(1, dtype="int32"),
len(thresholds.shape),
)
else:
one_thresh = np.array(True, dtype="bool")
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
f'Invalid keys: "{invalid_keys}". '
f'Valid variable key options are: "{list(ConfusionMatrix)}"'
)
y_pred, y_true = squeeze_or_expand_to_same_rank(y_pred, y_true)
if sample_weight is not None:
sample_weight = ops.expand_dims(
ops.cast(sample_weight, dtype=variable_dtype), axis=-1
)
_, sample_weight = squeeze_or_expand_to_same_rank(
y_true, sample_weight, expand_rank_1=False
)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
if len(y_pred.shape) == 1:
raise ValueError(
"When class_id is provided, y_pred must be a 2D array "
"with shape (num_samples, num_classes), found shape: "
f"{y_pred.shape}"
)
# Preserve dimension to match with sample_weight
y_true = y_true[..., class_id, None]
y_pred = y_pred[..., class_id, None]
if thresholds_distributed_evenly:
return _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=multi_label,
sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon,
)
if None in y_pred.shape:
pred_shape = ops.shape(y_pred)
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = ops.cast(
ops.prod(ops.array(pred_shape[1:]), axis=0), "int32"
)
thresh_label_tile = ops.where(one_thresh, num_labels, 1)
else:
pred_shape = ops.shape(y_pred)
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = np.prod(pred_shape[1:], axis=0).astype("int32")
thresh_label_tile = np.where(one_thresh, num_labels, 1)
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = ops.expand_dims(y_pred, 0)
labels_extra_dim = ops.expand_dims(ops.cast(y_true, dtype="bool"), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = ops.reshape(y_pred, [1, -1])
labels_extra_dim = ops.reshape(ops.cast(y_true, dtype="bool"), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = ops.tile(
ops.reshape(thresholds, thresh_pretile_shape), thresh_tiles
)
# Tile the predictions for every threshold.
preds_tiled = ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = ops.broadcast_to(
ops.cast(sample_weight, dtype=y_pred.dtype), ops.shape(y_pred)
)
weights_tiled = ops.tile(
ops.reshape(sample_weight, thresh_tiles), data_tiles
)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
label_weights_tiled = ops.tile(
ops.reshape(label_weights, thresh_tiles), data_tiles
)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = ops.multiply(weights_tiled, label_weights_tiled)
def weighted_assign_add(label, pred, weights, var):
label_and_pred = ops.cast(ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= ops.cast(weights, dtype=var.dtype)
var.assign(var + ops.sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (
label_is_neg,
pred_is_neg,
)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
weighted_assign_add(
label, pred, weights_tiled, variables_to_update[matrix_cond]
)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the
same shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = ops.top_k(x, k)
top_k_mask = ops.sum(
ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2
)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def confusion_matrix(
labels,
predictions,
num_classes,
weights=None,
dtype="int32",
):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent
the real labels. The confusion matrix is always a 2-D array of shape
`(n, n)`, where `n` is the number of valid labels for a given classification
task. Both prediction and labels must be 1-D arrays of the same shape in
order for this function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
keras.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D tensor of real labels for the classification task.
predictions: 1-D tensor of predictions for a given classification.
num_classes: The possible number of labels the classification
task can have.
weights: An optional tensor whose shape matches `predictions`.
dtype: Data type of the confusion matrix.
Returns:
A tensor of type `dtype` with shape `(n, n)` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
"""
labels = ops.convert_to_tensor(labels, dtype)
predictions = ops.convert_to_tensor(predictions, dtype)
labels, predictions = squeeze_or_expand_to_same_rank(labels, predictions)
predictions = ops.cast(predictions, dtype)
labels = ops.cast(labels, dtype)
if weights is not None:
weights = ops.convert_to_tensor(weights, dtype)
indices = ops.stack([labels, predictions], axis=1)
values = ops.ones_like(predictions, dtype) if weights is None else weights
indices = ops.cast(indices, dtype="int64")
values = ops.cast(values, dtype=dtype)
num_classes = int(num_classes)
confusion_matrix = ops.scatter(indices, values, (num_classes, num_classes))
return confusion_matrix
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/correlation_metrics.py | keras/src/metrics/correlation_metrics.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.metrics import reduction_metrics
@keras_export("keras.metrics.pearson_correlation")
def pearson_correlation(y_true, y_pred, axis=-1):
"""Computes the Pearson coefficient between labels and predictions.
Formula:
```python
loss = mean(l2norm(y_true - mean(y_true) * l2norm(y_pred - mean(y_pred)))
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Pearson Correlation Coefficient tensor.
Example:
>>> y_true = [[0, 1, 0.5], [1, 1, 0.2]]
>>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]]
>>> loss = keras.losses.concordance_correlation(
... y_true, y_pred, axis=-1
... ).numpy()
[1. 0.99339927]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_true_norm = y_true - ops.mean(y_true, axis=axis, keepdims=True)
y_pred_norm = y_pred - ops.mean(y_pred, axis=axis, keepdims=True)
y_true_norm = y_true_norm / ops.std(y_true_norm, axis=axis, keepdims=True)
y_pred_norm = y_pred_norm / ops.std(y_pred_norm, axis=axis, keepdims=True)
return ops.mean(y_true_norm * y_pred_norm, axis=axis)
@keras_export("keras.metrics.concordance_correlation")
def concordance_correlation(y_true, y_pred, axis=-1):
"""Computes the Concordance coefficient between labels and predictions.
Formula:
```python
loss = mean(
2 * (y_true - mean(y_true) * (y_pred - mean(y_pred)) / (
var(y_true) + var(y_pred) + square(mean(y_true) - mean(y_pred))
)
)
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Concordance Correlation Coefficient tensor.
Example:
>>> y_true = [[0, 1, 0.5], [1, 1, 0.2]]
>>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]]
>>> loss = keras.losses.concordance_correlation(
... y_true, y_pred, axis=-1
... ).numpy()
[0.97560976 0.98765432]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_true_mean = ops.mean(y_true, axis=axis, keepdims=True)
y_pred_mean = ops.mean(y_pred, axis=axis, keepdims=True)
y_true_var = ops.var(y_true - y_true_mean, axis=axis, keepdims=True)
y_pred_var = ops.var(y_pred - y_pred_mean, axis=axis, keepdims=True)
covar = (y_true - y_pred_mean) * (y_pred - y_pred_mean)
norm = y_true_var + y_pred_var + ops.square(y_true_mean - y_pred_mean)
return ops.mean(2 * covar / (norm + backend.epsilon()), axis=axis)
@keras_export("keras.metrics.PearsonCorrelation")
class PearsonCorrelation(reduction_metrics.MeanMetricWrapper):
"""Calculates the Pearson Correlation Coefficient (PCC).
PCC measures the linear relationship between the true values (`y_true`) and
the predicted values (`y_pred`). The coefficient ranges from -1 to 1, where
a value of 1 implies a perfect positive linear correlation, 0 indicates no
linear correlation, and -1 indicates a perfect negative linear correlation.
This metric is widely used in regression tasks where the strength of the
linear relationship between predictions and true labels is an
important evaluation criterion.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) integer or tuple of integers of the axis/axes along
which to compute the metric. Defaults to `-1`.
Example:
>>> pcc = keras.metrics.PearsonCorrelation(axis=-1)
>>> y_true = [[0, 1, 0.5], [1, 1, 0.2]]
>>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]]
>>> pcc.update_state(y_true, y_pred)
>>> pcc.result()
0.9966996338993913
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mean_squared_error',
metrics=[keras.metrics.PearsonCorrelation()])
```
"""
def __init__(
self,
name="pearson_correlation",
dtype=None,
axis=-1,
):
super().__init__(
fn=pearson_correlation,
name=name,
dtype=dtype,
axis=axis,
)
self.axis = axis
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"axis": self.axis,
}
@keras_export("keras.metrics.ConcordanceCorrelation")
class ConcordanceCorrelation(reduction_metrics.MeanMetricWrapper):
"""Calculates the Concordance Correlation Coefficient (CCC).
CCC evaluates the agreement between true values (`y_true`) and predicted
values (`y_pred`) by considering both precision and accuracy. The
coefficient ranges from -1 to 1, where a value of 1 indicates perfect
agreement.
This metric is useful in regression tasks where it is important to assess
how well the predictions match the true values, taking into account both
their correlation and proximity to the 45-degree line of perfect
concordance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) integer or tuple of integers of the axis/axes along
which to compute the metric. Defaults to `-1`.
Example:
>>> ccc = keras.metrics.ConcordanceCorrelation(axis=-1)
>>> y_true = [[0, 1, 0.5], [1, 1, 0.2]]
>>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]]
>>> ccc.update_state(y_true, y_pred)
>>> ccc.result()
0.9816320385426076
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mean_squared_error',
metrics=[keras.metrics.ConcordanceCorrelation()])
```
"""
def __init__(
self,
name="concordance_correlation",
dtype=None,
axis=-1,
):
super().__init__(
fn=concordance_correlation,
name=name,
dtype=dtype,
axis=axis,
)
self.axis = axis
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"axis": self.axis,
}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/metrics/hinge_metrics_test.py | keras/src/metrics/hinge_metrics_test.py | import numpy as np
from keras.src import testing
from keras.src.metrics import hinge_metrics
class HingeTest(testing.TestCase):
def test_config(self):
hinge_obj = hinge_metrics.Hinge(name="hinge", dtype="int32")
self.assertEqual(hinge_obj.name, "hinge")
self.assertEqual(hinge_obj._dtype, "int32")
# Check save and restore config
hinge_obj2 = hinge_metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, "hinge")
self.assertEqual(len(hinge_obj2.variables), 2)
self.assertEqual(hinge_obj2._dtype, "int32")
def test_unweighted(self):
hinge_obj = hinge_metrics.Hinge()
y_true = np.array([[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
hinge_obj.update_state(y_true, y_pred)
result = hinge_obj.result()
self.assertAllClose(0.506, result, atol=1e-3)
def test_weighted(self):
hinge_obj = hinge_metrics.Hinge()
y_true = np.array([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
sample_weight = np.array([1.5, 2.0])
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.493, result, atol=1e-3)
class SquaredHingeTest(testing.TestCase):
def test_config(self):
sq_hinge_obj = hinge_metrics.SquaredHinge(
name="squared_hinge", dtype="int32"
)
self.assertEqual(sq_hinge_obj.name, "squared_hinge")
self.assertEqual(sq_hinge_obj._dtype, "int32")
# Check save and restore config
sq_hinge_obj2 = hinge_metrics.SquaredHinge.from_config(
sq_hinge_obj.get_config()
)
self.assertEqual(sq_hinge_obj2.name, "squared_hinge")
self.assertEqual(len(sq_hinge_obj2.variables), 2)
self.assertEqual(sq_hinge_obj2._dtype, "int32")
def test_unweighted(self):
sq_hinge_obj = hinge_metrics.SquaredHinge()
y_true = np.array([[0, 1, 0, 1], [0, 0, 1, 1]], dtype="float32")
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
sq_hinge_obj.update_state(y_true, y_pred)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = hinge_metrics.SquaredHinge()
y_true = np.array([[-1, 1, -1, 1], [-1, -1, 1, 1]], dtype="float32")
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
sample_weight = np.array([1.5, 2.0])
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, result, atol=1e-3)
class CategoricalHingeTest(testing.TestCase):
def test_config(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge(
name="cat_hinge", dtype="int32"
)
self.assertEqual(cat_hinge_obj.name, "cat_hinge")
self.assertEqual(cat_hinge_obj._dtype, "int32")
# Check save and restore config
cat_hinge_obj2 = hinge_metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config()
)
self.assertEqual(cat_hinge_obj2.name, "cat_hinge")
self.assertEqual(len(cat_hinge_obj2.variables), 2)
self.assertEqual(cat_hinge_obj2._dtype, "int32")
def test_unweighted(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge()
y_true = np.array(
(
(0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1),
),
dtype="float32",
)
y_pred = np.array(
(
(0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1),
),
dtype="float32",
)
cat_hinge_obj.update_state(y_true, y_pred)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge()
y_true = np.array(
(
(0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1),
),
dtype="float32",
)
y_pred = np.array(
(
(0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1),
),
dtype="float32",
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, result, atol=1e-5)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/compile_utils_test.py | keras/src/trainers/compile_utils_test.py | from collections import namedtuple
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import metrics as losses_module
from keras.src import metrics as metrics_module
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.trainers.compile_utils import CompileLoss
from keras.src.trainers.compile_utils import CompileMetrics
class TestCompileMetrics(testing.TestCase):
def test_single_output_case(self):
compile_metrics = CompileMetrics(
metrics=[metrics_module.MeanSquaredError()],
weighted_metrics=[metrics_module.MeanSquaredError()],
)
# Test symbolic build
y_true = backend.KerasTensor((3, 4))
y_pred = backend.KerasTensor((3, 4))
compile_metrics.build(y_true, y_pred)
# Test eager build
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]])
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
self.assertAllClose(result["mean_squared_error"], 0.055833336)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
self.assertAllClose(result["mean_squared_error"], 0.0)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0)
def test_list_output_case(self):
compile_metrics = CompileMetrics(
metrics=[
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
],
weighted_metrics=[
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
],
)
# Test symbolic build
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_metrics.build(y_true, y_pred)
self.assertEqual(len(compile_metrics.metrics), 8)
# Test eager build
y_true = [
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
]
y_pred = [
np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
]
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
self.assertEqual(len(compile_metrics.metrics), 8)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = [
np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
]
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["mean_squared_error"], 0.055833336)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["mean_squared_error"], 0.0)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0)
def test_dict_output_case(self):
compile_metrics = CompileMetrics(
metrics={
"output_1": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
},
weighted_metrics={
"output_1": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
},
)
# Test symbolic build
y_true = {
"output_1": backend.KerasTensor((3, 4)),
"output_2": backend.KerasTensor((3, 4)),
}
y_pred = {
"output_1": backend.KerasTensor((3, 4)),
"output_2": backend.KerasTensor((3, 4)),
}
compile_metrics.build(y_true, y_pred)
# Test eager build
y_true = {
"output_1": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"output_2": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
}
y_pred = {
"output_1": np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
"output_2": np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
}
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = {
"output_1": np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
"output_2": np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
}
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
# Result values obtained from `tf.keras`
# m = tf.keras.metrics.MeanSquaredError()
# m.update_state(y_true, y_pred1, sample_weight=weight)
# m.update_state(y_true, y_pred2, sample_weight=weight)
# m.result().numpy()
self.assertAllClose(result["output_1_mean_squared_error"], 0.055833336)
self.assertAllClose(result["output_2_mean_squared_error"], 0.055833336)
self.assertAllClose(result["output_1_mse"], 0.055833336)
self.assertAllClose(result["output_2_mse"], 0.055833336)
self.assertAllClose(
result["output_1_weighted_mean_squared_error"], 0.0725
)
self.assertAllClose(
result["output_2_weighted_mean_squared_error"], 0.0725
)
self.assertAllClose(result["output_1_weighted_mse"], 0.0725)
self.assertAllClose(result["output_2_weighted_mse"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["output_1_mean_squared_error"], 0.0)
self.assertAllClose(result["output_2_mean_squared_error"], 0.0)
self.assertAllClose(result["output_1_weighted_mean_squared_error"], 0.0)
self.assertAllClose(result["output_2_weighted_mean_squared_error"], 0.0)
def test_name_conversions(self):
compile_metrics = CompileMetrics(
metrics=["acc", "accuracy", "mse"],
weighted_metrics=[],
)
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_metrics.build(y_true, y_pred)
compile_metrics.update_state(y_true, y_pred, sample_weight=None)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 3)
self.assertAllClose(result["acc"], 0.333333)
self.assertAllClose(result["accuracy"], 0.333333)
self.assertTrue("mse" in result)
def test_custom_metric_function(self):
def my_custom_metric(y_true, y_pred):
return ops.mean(ops.square(y_true - y_pred), axis=-1)
compile_metrics = CompileMetrics(
metrics=[my_custom_metric],
weighted_metrics=[],
)
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_metrics.build(y_true, y_pred)
compile_metrics.update_state(y_true, y_pred, sample_weight=None)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 1)
self.assertTrue("my_custom_metric" in result)
def test_dict_outputs_ignore_mismatched_output_names(self):
"""Tests that when output_names does not match dict keys, the correct
keys are used."""
# output_names represent internal op names that do not match dict keys.
compile_metrics = CompileMetrics(
metrics={
"a": metrics_module.MeanSquaredError(),
"b": metrics_module.MeanSquaredError(),
},
weighted_metrics=None,
output_names=["dense", "dense_1"],
)
# Symbolic build with dict outputs keyed by user-facing names.
y_true = {
"a": backend.KerasTensor((3, 2)),
"b": backend.KerasTensor((3, 2)),
}
y_pred = {
"a": backend.KerasTensor((3, 2)),
"b": backend.KerasTensor((3, 2)),
}
# The build method should correctly map metrics for outputs 'a' and 'b',
# even when the op names do not match.
compile_metrics.build(y_true, y_pred)
# Make the two outputs produce different MSEs to verify mapping.
y_true = {
"a": np.zeros((3, 2), dtype="float32"),
"b": np.zeros((3, 2), dtype="float32"),
}
y_pred = {
# MSE(a) = 0.0
"a": np.zeros((3, 2), dtype="float32"),
# MSE(b) = 1.0
"b": np.ones((3, 2), dtype="float32"),
}
compile_metrics.update_state(y_true, y_pred)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
# Should expose metrics under the dict keys ('a', 'b'),
# and not the internal names.
self.assertIn("a_mean_squared_error", result)
self.assertIn("b_mean_squared_error", result)
self.assertAllClose(result["a_mean_squared_error"], 0.0)
self.assertAllClose(result["b_mean_squared_error"], 1.0, atol=1e-6)
class TestCompileLoss(testing.TestCase):
def test_single_output_case(self):
compile_loss = CompileLoss(
loss=losses_module.MeanSquaredError(),
)
# Test symbolic build
y_true = backend.KerasTensor((3, 4))
y_pred = backend.KerasTensor((3, 4))
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 0.068333, atol=1e-5)
def test_single_output_case_with_crossentropy_loss(self):
compile_loss = CompileLoss(loss="crossentropy")
# Test symbolic build
y_true = backend.KerasTensor((3, 4))
y_pred = backend.KerasTensor((3, 4))
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 0.706595, atol=1e-5)
@parameterized.parameters(True, False)
def test_list_output_case(self, broadcast):
if broadcast:
# Test broadcasting single loss to all outputs
compile_loss = CompileLoss(
loss="mse",
)
else:
compile_loss = CompileLoss(
loss=["mse", "mse"],
)
# Test symbolic build
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = [
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
]
y_pred = [
np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
]
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 0.953333, atol=1e-5)
@parameterized.parameters(True, False)
def test_dict_output_case(self, broadcast):
if broadcast:
# Test broadcasting single loss to all outputs
compile_loss = CompileLoss(
loss="mse",
)
else:
compile_loss = CompileLoss(
loss={"a": "mse", "b": "mse"},
)
# Test symbolic build
y_true = {
"a": backend.KerasTensor((3, 4)),
"b": backend.KerasTensor((3, 4)),
}
y_pred = {
"a": backend.KerasTensor((3, 4)),
"b": backend.KerasTensor((3, 4)),
}
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = {
"a": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
sample_weight = {
"a": np.array([1.0, 2.0, 3.0]),
"b": np.array([3.0, 2.0, 1.0]),
}
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred, sample_weight)
self.assertAllClose(value, 1.266666, atol=1e-5)
def test_list_loss_dict_data(self):
compile_loss = CompileLoss(loss=["mse", "mae"], output_names=["b", "a"])
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_loss.build(y_true, y_pred)
y_true = {
"a": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 1.07666, atol=1e-5)
def test_struct_loss(self):
y_true = {
"a": {
"c": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"d": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
},
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": {
"c": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"d": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
},
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
loss = {"a": {"c": "mse", "d": "mae"}}
compile_loss = CompileLoss(loss=loss, output_names=["c", "d", "b"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_pred
)
compile_loss.build(y_true_symb, y_pred_symb)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 1.07666, atol=1e-5)
def test_struct_loss_valid_weights(self):
y_true = {
"a": np.array([1, 2]),
"b": np.array([1, 2]),
}
y_pred = {
"a": np.array([3, 4]),
"b": np.array([3, 4]),
}
loss = {"a": "mse", "b": "mse"}
compile_loss = CompileLoss(
loss=loss,
output_names=["a", "b"],
loss_weights={
"a": np.ones((2,)),
"b": np.zeros((2,)),
},
)
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 4)
# Metrics still report unweighted loss.
a_loss_mean, b_loss_mean = compile_loss.metrics
self.assertEqual(a_loss_mean.result(), 4)
self.assertEqual(b_loss_mean.result(), 4)
def test_struct_loss_invalid_weights(self):
y_true = {
"a": {
"c": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"d": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
},
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": {
"c": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"d": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
},
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
loss = {"a": {"c": "mse", "d": "mae"}}
compile_loss = CompileLoss(
loss=loss, output_names=["c", "d", "b"], loss_weights=[1]
)
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_pred
)
with self.assertRaisesRegex(
ValueError, "must match the number of losses"
):
compile_loss.build(y_true_symb, y_pred_symb)
def test_struct_loss_indice_path(self):
y_true = {
"a": (
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": (
np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
loss = {"a": ["mse", "mae"]}
compile_loss = CompileLoss(loss=loss, output_names=["c", "d", "b"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_pred
)
compile_loss.build(y_true_symb, y_pred_symb)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 1.07666, atol=1e-5)
def test_struct_loss_namedtuple(self):
Point = namedtuple("Point", ["x", "y"])
y_true = {
"a": Point(
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": Point(
np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
loss = {"a": Point("mse", "mae")}
compile_loss = CompileLoss(loss=loss, output_names=["c", "d", "b"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_pred
)
compile_loss.build(y_true_symb, y_pred_symb)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 1.07666, atol=1e-5)
def test_struct_loss_invalid_path(self):
y_true = {
"a": {
"c": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"d": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
},
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": {
"c": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"d": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
},
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
loss = {"a": {"c": "mse"}, "b": {"d": "mae"}}
compile_loss = CompileLoss(loss=loss, output_names=["c", "d", "b"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((3, 4)), y_pred
)
with self.assertRaisesRegex(
KeyError, "can't be found in the model's output"
):
compile_loss.build(y_true_symb, y_pred_symb)
def test_different_container_types(self):
y1, y2, y3 = np.array([[1]]), np.array([[2]]), np.array([[3]])
y_true = ([{"a": y1}, {"b": ([y2], y3)}],)
y_pred = [({"a": y1}, {"b": [(y2,), y3]})]
loss = "mse"
compile_loss = CompileLoss(loss=loss, output_names=["a", "b", "c"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((1, 1)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((1, 1)), y_pred
)
compile_loss.build(y_true_symb, y_pred_symb)
compile_loss(y_true, y_pred)
def test_structure_mismatch(self):
y_true = [np.array([[1]]), np.array([[1]])]
y_pred = [np.array([[1]]), np.array([[1]])]
loss = ["mse", "mse"]
compile_loss = CompileLoss(loss=loss, output_names=["a", "b"])
y_true_symb = tree.map_structure(
lambda _: backend.KerasTensor((1, 1)), y_true
)
y_pred_symb = tree.map_structure(
lambda _: backend.KerasTensor((1, 1)), y_pred
)
compile_loss.build(y_true_symb, y_pred_symb)
with self.assertRaisesRegex(
ValueError, "y_true and y_pred have different structures."
):
wrong_struc_y_true = [np.array([[1]])]
compile_loss(wrong_struc_y_true, y_pred)
@parameterized.parameters(
["mse", None, None],
[None, "mse", None],
[None, None, "mse"],
[None, "mse", "mse"],
["mse", None, "mse"],
)
def test_y_true_partial_y_pred_span(self, *loss_conf):
loss_conf = list(loss_conf)
ones = np.ones((320, 3))
zeros = np.zeros((320, 3))
twos = np.ones((320, 3)) * 2
y_pred = [zeros, ones, twos]
y_true = [y for y, loss in zip(y_pred, loss_conf) if loss is not None]
y_true = y_true[0] if len(y_true) == 1 else y_true
compile_loss = CompileLoss(loss=loss_conf, output_names=["a", "b", "c"])
# build call
compile_loss(y_true, y_pred)
# built call
loss = compile_loss(y_true, y_pred)
self.assertEqual(loss, 0.0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/compile_utils.py | keras/src/trainers/compile_utils.py | from collections import namedtuple
from keras.src import losses as losses_module
from keras.src import metrics as metrics_module
from keras.src import ops
from keras.src import tree
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.losses import loss as loss_module
from keras.src.utils.naming import get_object_name
from keras.src.utils.tracking import Tracker
class MetricsList(metrics_module.Metric):
def __init__(self, metrics, name="metrics_list", output_name=None):
super().__init__(name=name)
self.metrics = metrics
self.output_name = output_name
def update_state(self, y_true, y_pred, sample_weight=None):
for m in self.metrics:
m.update_state(y_true, y_pred, sample_weight=sample_weight)
def reset_state(self):
for m in self.metrics:
m.reset_state()
def get_result(self):
return {m.name: m.result() for m in self.metrics}
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
def is_function_like(value):
if value is None:
return True
if isinstance(value, str):
return True
if callable(value):
return True
return False
def is_binary_or_sparse_categorical(y_true, y_pred):
y_t_rank = len(y_true.shape)
y_p_rank = len(y_pred.shape)
y_t_last_dim = y_true.shape[-1]
y_p_last_dim = y_pred.shape[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = (
y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1
)
return is_binary, is_sparse_categorical
def get_metric(identifier, y_true, y_pred):
if identifier is None:
return None # Ok to have no metric for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if str(identifier).lower() not in ["accuracy", "acc"]:
metric_obj = metrics_module.get(identifier)
else:
is_binary, is_sparse_categorical = is_binary_or_sparse_categorical(
y_true, y_pred
)
if is_binary:
metric_obj = metrics_module.BinaryAccuracy(name=str(identifier))
elif is_sparse_categorical:
metric_obj = metrics_module.SparseCategoricalAccuracy(
name=str(identifier)
)
else:
metric_obj = metrics_module.CategoricalAccuracy(
name=str(identifier)
)
if isinstance(identifier, str):
metric_name = identifier
else:
metric_name = get_object_name(metric_obj)
if not isinstance(metric_obj, metrics_module.Metric):
metric_obj = metrics_module.MeanMetricWrapper(metric_obj)
metric_obj.name = metric_name
return metric_obj
def get_loss(identifier, y_true, y_pred):
if identifier is None:
return None # Ok to have no loss for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if str(identifier).lower() not in ["crossentropy", "ce"]:
loss_obj = losses_module.get(identifier)
else:
is_binary, is_sparse_categorical = is_binary_or_sparse_categorical(
y_true, y_pred
)
if is_binary:
loss_obj = losses_module.binary_crossentropy
elif is_sparse_categorical:
loss_obj = losses_module.sparse_categorical_crossentropy
else:
loss_obj = losses_module.categorical_crossentropy
if not isinstance(loss_obj, losses_module.Loss):
if isinstance(identifier, str):
loss_name = identifier
else:
loss_name = get_object_name(loss_obj)
loss_obj = losses_module.LossFunctionWrapper(loss_obj, name=loss_name)
return loss_obj
class CompileMetrics(metrics_module.Metric):
def __init__(
self,
metrics,
weighted_metrics,
name="compile_metric",
output_names=None,
):
super().__init__(name=name)
if metrics and not isinstance(metrics, (list, tuple, dict)):
raise ValueError(
"Expected `metrics` argument to be a list, tuple, or dict. "
f"Received instead: metrics={metrics} of type {type(metrics)}"
)
if weighted_metrics and not isinstance(
weighted_metrics, (list, tuple, dict)
):
raise ValueError(
"Expected `weighted_metrics` argument to be a list, tuple, or "
f"dict. Received instead: weighted_metrics={weighted_metrics} "
f"of type {type(weighted_metrics)}"
)
self._user_metrics = metrics
self._user_weighted_metrics = weighted_metrics
self.built = False
self.name = "compile_metrics"
self.output_names = output_names
self._resolved_output_names = None
@property
def metrics(self):
if not self.built:
return []
metrics = []
for m in self._flat_metrics + self._flat_weighted_metrics:
if isinstance(m, MetricsList):
metrics.extend(m.metrics)
elif m is not None:
metrics.append(m)
return metrics
@property
def variables(self):
# Avoiding relying on implicit tracking since
# CompileMetrics may be instantiated or built in a no tracking scope.
if not self.built:
return []
vars = []
for m in self.metrics:
if m is not None:
vars.extend(m.variables)
return vars
def build(self, y_true, y_pred):
num_outputs = 1 # default
# Resolve output names. If y_pred is a dict, prefer its keys.
if isinstance(y_pred, dict):
keys = sorted(list(y_pred.keys()))
if self.output_names and set(self.output_names) == set(keys):
# If there is a perfect match, use the user-provided order.
output_names = self.output_names
else:
output_names = keys
elif self.output_names:
output_names = self.output_names
elif isinstance(y_pred, (list, tuple)):
num_outputs = len(y_pred)
if all(hasattr(x, "_keras_history") for x in y_pred):
output_names = [x._keras_history.operation.name for x in y_pred]
else:
output_names = None
else:
output_names = None
self._resolved_output_names = output_names
if output_names:
num_outputs = len(output_names)
y_pred = self._flatten_y(y_pred)
y_true = self._flatten_y(y_true)
metrics = self._user_metrics
weighted_metrics = self._user_weighted_metrics
self._flat_metrics = self._build_metrics_set(
metrics,
num_outputs,
output_names,
y_true,
y_pred,
argument_name="metrics",
)
self._flat_weighted_metrics = self._build_metrics_set(
weighted_metrics,
num_outputs,
output_names,
y_true,
y_pred,
argument_name="weighted_metrics",
)
self.built = True
def _build_metrics_set(
self, metrics, num_outputs, output_names, y_true, y_pred, argument_name
):
flat_metrics = []
if isinstance(metrics, dict):
for name in metrics.keys():
if name not in output_names:
raise ValueError(
f"In the dict argument `{argument_name}`, key "
f"'{name}' does not correspond to any model "
f"output. Received:\n{argument_name}={metrics}"
)
if num_outputs == 1:
if not metrics:
flat_metrics.append(None)
else:
if isinstance(metrics, dict):
metrics = tree.flatten(metrics)
if not isinstance(metrics, list):
metrics = [metrics]
if not all(is_function_like(m) for m in metrics):
raise ValueError(
f"Expected all entries in the `{argument_name}` list "
f"to be metric objects. Received instead:\n"
f"{argument_name}={metrics}"
)
flat_metrics.append(
MetricsList(
[
get_metric(m, y_true[0], y_pred[0])
for m in metrics
if m is not None
]
)
)
else:
if isinstance(metrics, (list, tuple)):
if len(metrics) != len(y_pred):
raise ValueError(
"For a model with multiple outputs, "
f"when providing the `{argument_name}` argument as a "
"list, it should have as many entries as the model has "
f"outputs. Received:\n{argument_name}={metrics}\nof "
f"length {len(metrics)} whereas the model has "
f"{len(y_pred)} outputs."
)
for idx, (mls, yt, yp) in enumerate(
zip(metrics, y_true, y_pred)
):
if not isinstance(mls, list):
mls = [mls]
name = output_names[idx] if output_names else None
if not all(is_function_like(e) for e in mls):
raise ValueError(
f"All entries in the sublists of the "
f"`{argument_name}` list should be metric objects. "
f"Found the following sublist with unknown "
f"types: {mls}"
)
flat_metrics.append(
MetricsList(
[
get_metric(m, yt, yp)
for m in mls
if m is not None
],
output_name=name,
)
)
elif isinstance(metrics, dict):
if output_names is None:
raise ValueError(
f"Argument `{argument_name}` can only be provided as a "
"dict when the model also returns a dict of outputs. "
f"Received {argument_name}={metrics}"
)
for name in metrics.keys():
if not isinstance(metrics[name], list):
metrics[name] = [metrics[name]]
if not all(is_function_like(e) for e in metrics[name]):
raise ValueError(
f"All entries in the sublists of the "
f"`{argument_name}` dict should be metric objects. "
f"At key '{name}', found the following sublist "
f"with unknown types: {metrics[name]}"
)
for name, yt, yp in zip(output_names, y_true, y_pred):
if name in metrics:
flat_metrics.append(
MetricsList(
[
get_metric(m, yt, yp)
for m in metrics[name]
if m is not None
],
output_name=name,
)
)
else:
flat_metrics.append(None)
return flat_metrics
def _flatten_y(self, y):
names = self._resolved_output_names
if isinstance(y, dict) and names:
result = []
for name in names:
if name in y:
result.append(y[name])
return result
return tree.flatten(y)
def update_state(self, y_true, y_pred, sample_weight=None):
if not self.built:
self.build(y_true, y_pred)
y_true = self._flatten_y(y_true)
y_pred = self._flatten_y(y_pred)
for m, y_t, y_p in zip(self._flat_metrics, y_true, y_pred):
if m:
m.update_state(y_t, y_p)
if sample_weight is not None:
sample_weight = self._flatten_y(sample_weight)
# For multi-outputs, repeat sample weights for n outputs.
if len(sample_weight) < len(y_true):
sample_weight = [sample_weight[0] for _ in range(len(y_true))]
else:
sample_weight = [None for _ in range(len(y_true))]
for m, y_t, y_p, s_w in zip(
self._flat_weighted_metrics, y_true, y_pred, sample_weight
):
if m:
m.update_state(y_t, y_p, s_w)
def reset_state(self):
if not self.built:
return
for m in self._flat_metrics:
if m:
m.reset_state()
for m in self._flat_weighted_metrics:
if m:
m.reset_state()
def result(self):
if not self.built:
raise ValueError(
"Cannot get result() since the metric has not yet been built."
)
results = {}
unique_name_counters = {}
for mls in self._flat_metrics:
if not mls:
continue
for m in mls.metrics:
name = m.name
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
results[name] = m.result()
unique_name_counters[name] = 1
else:
index = unique_name_counters[name]
unique_name_counters[name] += 1
name = f"{name}_{index}"
results[name] = m.result()
for mls in self._flat_weighted_metrics:
if not mls:
continue
for m in mls.metrics:
name = m.name
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
results[name] = m.result()
unique_name_counters[name] = 1
else:
name = f"weighted_{m.name}"
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
unique_name_counters[name] = 1
else:
index = unique_name_counters[name]
unique_name_counters[name] += 1
name = f"{name}_{index}"
results[name] = m.result()
return results
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
class CompileLoss(losses_module.Loss):
Loss = namedtuple("Loss", ["path", "loss", "loss_weights", "name"])
def __init__(
self,
loss,
loss_weights=None,
reduction="sum_over_batch_size",
output_names=None,
):
if loss_weights and not isinstance(
loss_weights, (list, tuple, dict, float)
):
raise ValueError(
"Expected `loss_weights` argument to be a float "
"(single output case) or a list, tuple, or "
"dict (multiple output case). "
f"Received instead: loss_weights={loss_weights} "
f"of type {type(loss_weights)}"
)
self._user_loss = loss
self._user_loss_weights = loss_weights
self.built = False
self.output_names = output_names
super().__init__(name="compile_loss", reduction=reduction)
# Use `Tracker` to track metrics for individual losses.
self._metrics = []
self._tracker = Tracker(
{
"metrics": (
lambda x: isinstance(x, metrics_module.Metric),
self._metrics,
)
}
)
self._flat_losses = None
self._y_pred_build_structure = None
self._y_true_build_structure = None
@property
def metrics(self):
return self._metrics
@property
def variables(self):
vars = []
for m in self.metrics:
vars.extend(m.variables)
return vars
def _build_nested(self, y_true, y_pred, loss, output_names, current_path):
flat_y_pred = tree.flatten(y_pred)
if not tree.is_nested(loss):
_loss = loss.loss
if _loss is None:
return
loss_weight = loss.weight
resolved_loss = get_loss(_loss, y_true, y_pred)
name_path = current_path
if not tree.is_nested(output_names):
if output_names is not None:
output_name = output_names
else:
output_name = resolved_loss.name
if len(name_path) == 0:
name_path = (output_name,)
elif isinstance(name_path[-1], int):
name_path = name_path[:-1] + (output_name,)
name = "/".join([str(path) for path in name_path])
if name == "":
if isinstance(output_names, dict):
flat_output_names = list(output_names.keys())
else:
flat_output_names = tree.flatten(output_names)
name = "_".join(flat_output_names)
self._flat_losses.append(
CompileLoss.Loss(current_path, resolved_loss, loss_weight, name)
)
return
elif (
issubclass(type(loss), (list, tuple))
and all([not tree.is_nested(_loss) for _loss in loss])
and len(loss) == len(flat_y_pred)
):
loss = tree.pack_sequence_as(y_pred, loss)
elif issubclass(type(loss), (list, tuple)) and not isinstance(
y_pred, type(loss)
):
for _loss in loss:
self._build_nested(
y_true,
y_pred,
_loss,
output_names,
current_path,
)
return
if not tree.is_nested(loss):
return self._build_nested(
y_true, y_pred, loss, output_names, current_path
)
if not isinstance(loss, type(y_pred)):
raise KeyError(
f"The path: {current_path} in "
"the `loss` argument, can't be found in "
"the model's output (`y_pred`)."
)
# shallow traverse the loss config
if isinstance(loss, dict):
iterator = loss.items()
def key_check_fn(key, objs):
return all(
[isinstance(obj, dict) and key in obj for obj in objs]
)
elif issubclass(type(loss), (list, tuple)):
iterator = enumerate(loss)
def key_check_fn(key, objs):
return all(
[
issubclass(type(obj), (list, tuple)) and key < len(obj)
for obj in objs
]
)
else:
raise TypeError(
f"Unsupported type {type(loss)} in the `loss` configuration."
)
for key, _loss in iterator:
if _loss is None:
continue
if not key_check_fn(key, (y_true, y_pred)):
raise KeyError(
f"The path: {current_path + (key,)} in "
"the `loss` argument, can't be found in "
"either the model's output (`y_pred`) or in the "
"labels (`y_true`)."
)
self._build_nested(
y_true[key],
y_pred[key],
_loss,
output_names[key],
current_path + (key,),
)
def build(self, y_true, y_pred):
loss = self._user_loss
loss_weights = self._user_loss_weights
flat_output_names = self.output_names
if (
self.output_names
and isinstance(self._user_loss, dict)
and not isinstance(y_pred, dict)
):
if set(self.output_names) == set(self._user_loss.keys()):
loss = [self._user_loss[name] for name in self.output_names]
if isinstance(self._user_loss_weights, dict):
loss_weights = [
self._user_loss_weights[name]
for name in self.output_names
]
else:
raise ValueError(
f"Expected keys {self.output_names} in loss dict, but "
f"found loss.keys()={list(self._user_loss.keys())}"
)
# Pytree leaf container
class WeightedLoss:
def __new__(cls, loss, weight):
if loss is None:
return None
return object.__new__(cls)
def __init__(self, loss, weight):
self.loss = loss
self.weight = weight
# pack the losses and the weights together
if loss_weights is not None:
try:
tree.assert_same_structure(loss, loss_weights)
except ValueError:
flat_loss_weights = tree.flatten(loss_weights)
if len(tree.flatten(loss)) != len(flat_loss_weights):
raise ValueError(
f"`loss_weights` must match the number of losses, "
f"got {len(tree.flatten(loss))} losses "
f"and {len(loss_weights)} weights."
)
loss_weights = tree.pack_sequence_as(loss, flat_loss_weights)
loss = tree.map_structure(
lambda _loss, _weight: WeightedLoss(_loss, _weight),
loss,
loss_weights,
)
else:
loss = tree.map_structure(
lambda _loss: WeightedLoss(_loss, None), loss
)
self._flat_losses = []
if (
isinstance(loss, dict)
and issubclass(type(y_pred), (list, tuple))
and set(loss.keys()) == set(flat_output_names)
and len(y_pred) == len(flat_output_names)
):
y_pred = {name: y_p for name, y_p in zip(flat_output_names, y_pred)}
y_true = {name: y_t for name, y_t in zip(flat_output_names, y_true)}
elif (
isinstance(loss, dict)
and not tree.is_nested(y_pred)
and set(loss.keys()) == set(flat_output_names)
and len(flat_output_names) == 1
):
y_pred = {
name: y_p for name, y_p in zip(flat_output_names, [y_pred])
}
y_true = {
name: y_t for name, y_t in zip(flat_output_names, [y_true])
}
try:
output_names = tree.pack_sequence_as(y_pred, flat_output_names)
except:
inferred_flat_output_names = self._get_y_pred_output_names(y_pred)
output_names = tree.pack_sequence_as(
y_pred, inferred_flat_output_names
)
if not tree.is_nested(loss):
loss = tree.map_structure(lambda x: loss, y_pred)
self._build_nested(y_true, y_pred, loss, output_names, ())
# Add `Mean` metric to the tracker for each loss.
if len(self._flat_losses) > 1:
for _loss in self._flat_losses:
name = f"{_loss.name}_loss"
self._tracker.add_to_store(
"metrics", metrics_module.Mean(name=name)
)
self._y_pred_build_structure = tree.map_structure(
lambda x: None, y_pred
)
self._y_true_build_structure = tree.map_structure(
lambda x: None, y_true
)
self.built = True
def _get_y_pred_output_names(self, y_pred):
flat_y_pred = tree.flatten(y_pred)
if all((isinstance(x, KerasTensor) for x in flat_y_pred)):
output_names = []
for tensor in flat_y_pred:
if hasattr(tensor, "_keras_history"):
output_names.append(tensor._keras_history.operation.name)
else:
output_names.append(tensor.name)
else:
output_names = [None] * len(flat_y_pred)
return output_names
def __call__(self, y_true, y_pred, sample_weight=None):
with ops.name_scope(self.name):
return self.call(y_true, y_pred, sample_weight)
def call(self, y_true, y_pred, sample_weight=None):
def resolve_path(path, object):
for _path in path:
object = object[_path]
return object
if not tree.is_nested(y_true) and not tree.is_nested(y_pred):
# Fast path: single output case / no loss-tracking metric.
if not self.built:
self.build(y_true, y_pred)
# Although we are in the fast path, we still need to iterate
# through the losses to prevent the torch compiler from failing.
loss_values = []
for path, loss_fn, loss_weight, _ in self._flat_losses:
y_t, y_p = (
resolve_path(path, y_true),
resolve_path(path, y_pred),
)
if sample_weight is not None and tree.is_nested(sample_weight):
_sample_weight = resolve_path(path, sample_weight)
else:
_sample_weight = sample_weight
value = ops.cast(
loss_fn(y_t, y_p, _sample_weight), dtype=self.dtype
)
if loss_weight is not None:
value = ops.multiply(value, loss_weight)
loss_values.append(value)
return loss_values[0]
try:
tree.assert_same_structure(y_pred, y_true)
except ValueError:
# Check case where y_true is either flat or leaf
if (
not tree.is_nested(y_true)
and hasattr(y_pred, "__len__")
and len(y_pred) == 1
):
y_true = [y_true]
# Check case where y_pred is list/tuple and y_true is dict
elif isinstance(y_pred, (list, tuple)) and isinstance(y_true, dict):
if set(self.output_names) == set(y_true.keys()):
y_true = [y_true[name] for name in self.output_names]
try:
y_true = tree.pack_sequence_as(y_pred, y_true)
except:
# Check case where y_true has the same structure but uses
# different (but reconcilable) container types,
# e.g `list` vs `tuple`.
try:
tree.assert_same_paths(y_true, y_pred)
y_true = tree.pack_sequence_as(y_pred, tree.flatten(y_true))
except:
try:
# Check case where loss is partially defined over y_pred
flat_y_true = tree.flatten(y_true)
flat_loss = tree.flatten(self._user_loss)
flat_loss_non_nones = [
(i, loss)
for i, loss in enumerate(flat_loss)
if loss is not None
]
assert len(flat_y_true) == len(flat_loss_non_nones)
y_true = [None] * len(flat_loss)
for y_t, (i, loss) in zip(
flat_y_true, flat_loss_non_nones
):
y_true[i] = y_t
y_true = tree.pack_sequence_as(self._user_loss, y_true)
except:
y_true_struct = tree.map_structure(
lambda _: "*", y_true
)
y_pred_struct = tree.map_structure(
lambda _: "*", y_pred
)
raise ValueError(
"y_true and y_pred have different structures.\n"
f"y_true: {y_true_struct}\n"
f"y_pred: {y_pred_struct}\n"
)
if not self.built:
self.build(y_true, y_pred)
try:
tree.assert_same_structure(self._y_pred_build_structure, y_pred)
except ValueError:
y_pred = tree.pack_sequence_as(
self._y_pred_build_structure, tree.flatten(y_pred)
)
try:
tree.assert_same_structure(self._y_true_build_structure, y_true)
except ValueError:
y_true = tree.pack_sequence_as(
self._y_true_build_structure, tree.flatten(y_true)
)
# We need to add a dummy `None` if the model has only a single output.
metrics = [None] if len(self.metrics) == 0 else self.metrics
# Iterate all losses in flat form.
loss_values = []
for (path, loss_fn, loss_weight, _), metric in zip(
self._flat_losses, metrics
):
y_t, y_p = resolve_path(path, y_true), resolve_path(path, y_pred)
if sample_weight is not None and tree.is_nested(sample_weight):
_sample_weight = resolve_path(path, sample_weight)
else:
_sample_weight = sample_weight
value = ops.cast(
loss_fn(y_t, y_p, _sample_weight), dtype=self.dtype
)
# Record *unweighted* individual losses.
if metric:
metric.update_state(
loss_module.unscale_loss_for_distribution(value),
sample_weight=tree.flatten(y_p)[0].shape[0],
)
if loss_weight is not None:
value = ops.multiply(value, loss_weight)
loss_values.append(value)
if loss_values:
total_loss = sum(loss_values)
return total_loss
return None
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/epoch_iterator.py | keras/src/trainers/epoch_iterator.py | """
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import contextlib
import warnings
from keras.src.backend import config
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
# Possibly cap steps_per_epoch for debugging runs.
max_steps_per_epoch = config.max_steps_per_epoch()
if max_steps_per_epoch:
if not steps_per_epoch or max_steps_per_epoch < steps_per_epoch:
warnings.warn(
"Limiting steps_per_epoch to %d" % max_steps_per_epoch
)
steps_per_epoch = max_steps_per_epoch
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
self._current_iterator = None
self._epoch_iterator = None
self._steps_seen = 0
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def _interrupted_warning(self):
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
def reset(self):
self._current_iterator = None
self._num_batches = self.data_adapter.num_batches
self._steps_seen = 0
self._epoch_iterator = None
self.data_adapter.on_epoch_end()
def _enumerate_iterator(self):
self.data_adapter.on_epoch_begin()
steps_per_epoch = self.steps_per_epoch or self._num_batches or -1
if steps_per_epoch > 0:
if self._current_iterator is None or self.steps_per_epoch is None:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
for step in range(0, steps_per_epoch, self.steps_per_execution):
if self._num_batches and self._steps_seen >= self._num_batches:
if self.steps_per_epoch:
self._interrupted_warning()
break
self._steps_seen += self.steps_per_execution
yield (
step,
step + self.steps_per_execution - 1,
self._current_iterator,
)
if self._num_batches and self._steps_seen >= self._num_batches:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
else:
iterator = iter(self._get_iterator())
step = -self.steps_per_execution
while True:
step += self.steps_per_execution
self._steps_seen = step + self.steps_per_execution
yield step, step + self.steps_per_execution - 1, iterator
self.data_adapter.on_epoch_end()
def __iter__(self):
self._epoch_iterator = self._enumerate_iterator()
return self
def __next__(self):
buffer = []
begin_step, end_step, iterator = next(self._epoch_iterator)
with self.catch_stop_iteration():
for _ in range(self.steps_per_execution):
data = next(iterator)
buffer.append(data)
return begin_step, end_step, buffer
if buffer:
return begin_step, end_step, buffer
raise StopIteration
def enumerate_epoch(self):
for begin_step, end_step, data in self:
yield begin_step, end_step, data
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
except StopIteration:
if self._num_batches is None:
self._num_batches = self._steps_seen
self._interrupted_warning()
self._current_iterator = None
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/epoch_iterator_test.py | keras/src/trainers/epoch_iterator_test.py | import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.trainers import data_adapters
from keras.src.trainers import epoch_iterator
class TestEpochIterator(testing.TestCase):
@parameterized.named_parameters(
[("iterator", "iterator"), ("enumerate_epoch", "enumerate_epoch")]
)
def test_basic_flow(self, call_type):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
iterator = epoch_iterator.EpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
)
steps_seen = []
if call_type == "iterator":
generator = iterator
else:
generator = iterator.enumerate_epoch()
for begin_step, end_step, batch in generator:
batch = batch[0]
steps_seen.append(begin_step)
self.assertEqual(begin_step, end_step)
self.assertEqual(len(batch), 3)
self.assertIsInstance(batch[0], np.ndarray)
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
def test_insufficient_data(self):
batch_size = 8
steps_per_epoch = 6
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.arange(dataset_size).reshape((dataset_size, 1))
y = x * 2
iterator = epoch_iterator.EpochIterator(
x=x,
y=y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
)
steps_seen = []
with pytest.warns(match="Your input ran out of data"):
for step, _, _ in iterator:
steps_seen.append(step)
self.assertLen(steps_seen, steps_per_epoch - 2)
self.assertIsInstance(iterator, epoch_iterator.EpochIterator)
def test_unsupported_y_arg_tfdata(self):
with self.assertRaisesRegex(ValueError, "`y` should not be passed"):
x = tf.data.Dataset.from_tensor_slices(np.random.random((100, 16)))
y = np.random.random((100, 4))
_ = epoch_iterator.EpochIterator(x=x, y=y)
def test_unsupported_sample_weights_arg_tfdata(self):
with self.assertRaisesRegex(
ValueError, "`sample_weights` should not be passed"
):
x = tf.data.Dataset.from_tensor_slices(np.random.random((100, 16)))
sample_weights = np.random.random((100,))
_ = epoch_iterator.EpochIterator(x=x, sample_weight=sample_weights)
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((64, 2)), np.random.random((64, 1))
)
torch_dataloader = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
iterator = epoch_iterator.EpochIterator(torch_dataloader)
for _, _, batch in iterator:
batch = batch[0]
self.assertEqual(batch[0].shape, (8, 2))
self.assertEqual(batch[1].shape, (8, 1))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_unsupported_y_arg_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((100, 16)), np.random.random((100, 4))
)
x = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
y = np.random.random((100, 4))
with self.assertRaisesRegex(
ValueError,
"When providing `x` as a torch DataLoader, `y` should not",
):
_ = epoch_iterator.EpochIterator(x=x, y=y)
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_unsupported_sample_weights_arg_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((100, 16)), np.random.random((100, 4))
)
x = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
sample_weights = np.random.random((100,))
with self.assertRaisesRegex(
ValueError,
"When providing `x` as a torch DataLoader, `sample_weights`",
):
_ = epoch_iterator.EpochIterator(x=x, sample_weight=sample_weights)
def test_python_generator_input(self):
def generator_example():
for i in range(100):
yield (np.array([i]), np.array([i * 2]))
x = generator_example()
epoch_iter = epoch_iterator.EpochIterator(x=x)
self.assertIsInstance(
epoch_iter.data_adapter,
data_adapters.GeneratorDataAdapter,
)
def test_unrecognized_data_type(self):
x = "unsupported_data"
with self.assertRaisesRegex(ValueError, "Unrecognized data type"):
_ = epoch_iterator.EpochIterator(x=x)
@parameterized.named_parameters(
[
{"testcase_name": "infinite", "infinite": True},
{"testcase_name": "finite", "infinite": False},
]
)
def test_epoch_callbacks(self, infinite):
class TestPyDataset(data_adapters.py_dataset_adapter.PyDataset):
def __init__(
self,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
infinite=False,
):
super().__init__(workers, use_multiprocessing, max_queue_size)
self.data = np.random.rand(64, 2)
self.batch_size = 16
self.infinite = infinite
# check that callbacks are called in the correct order
self.tracker = []
@property
def num_batches(self):
if self.infinite:
return None
return len(self.data) // self.batch_size
def on_epoch_begin(self):
self.tracker.append(1)
def __getitem__(self, index):
idx = index % 2
return self.data[
idx * self.batch_size : (idx + 1) * self.batch_size
]
def on_epoch_end(self):
self.tracker.append(2)
ds = TestPyDataset(infinite=infinite)
epoch_iter = epoch_iterator.EpochIterator(x=ds, steps_per_epoch=10)
num_epochs = 5
for epoch in range(num_epochs):
for _, _, _ in epoch_iter:
pass
self.assertAllEqual(ds.tracker, [1, 2] * num_epochs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/trainer.py | keras/src/trainers/trainer.py | import inspect
import platform
import warnings
from keras.src import backend
from keras.src import metrics as metrics_module
from keras.src import ops
from keras.src import optimizers
from keras.src import tree
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.saving import serialization_lib
from keras.src.trainers.compile_utils import CompileLoss
from keras.src.trainers.compile_utils import CompileMetrics
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.utils import python_utils
from keras.src.utils import traceback_utils
from keras.src.utils import tracking
class Trainer:
def __init__(self):
self._lock = False
self._run_eagerly = False
self._jit_compile = None
self.compiled = False
self.loss = None
self.steps_per_execution = 1
# Can be set by callbacks in on_train_begin
self._initial_epoch = None
self._compute_loss_has_training_arg = (
"training" in inspect.signature(self.compute_loss).parameters
)
# Placeholders used in `compile`
self._compile_loss = None
self._compile_metrics = None
self._loss_tracker = None
@traceback_utils.filter_traceback
@tracking.no_automatic_dependency_tracking
def compile(
self,
optimizer="rmsprop",
loss=None,
loss_weights=None,
metrics=None,
weighted_metrics=None,
run_eagerly=False,
steps_per_execution=1,
jit_compile="auto",
auto_scale_loss=True,
):
"""Configures the model for training.
Example:
```python
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.BinaryCrossentropy(),
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
],
)
```
Args:
optimizer: String (name of optimizer) or optimizer instance. See
`keras.optimizers`.
loss: Loss function. May be a string (name of loss function), or
a `keras.losses.Loss` instance. See `keras.losses`. A
loss function is any callable with the signature
`loss = fn(y_true, y_pred)`, where `y_true` are the ground truth
values, and `y_pred` are the model's predictions.
`y_true` should have shape `(batch_size, d0, .. dN)`
(except in the case of sparse loss functions such as
sparse categorical crossentropy which expects integer arrays of
shape `(batch_size, d0, .. dN-1)`).
`y_pred` should have shape `(batch_size, d0, .. dN)`.
The loss function should return a float tensor.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions of
different model outputs. The loss value that will be minimized
by the model will then be the *weighted sum* of all individual
losses, weighted by the `loss_weights` coefficients. If a list,
it is expected to have a 1:1 mapping to the model's outputs. If
a dict, it is expected to map output names (strings) to scalar
coefficients.
metrics: List of metrics to be evaluated by the model during
training and testing. Each of this can be a string (name of a
built-in function), function or a `keras.metrics.Metric`
instance. See `keras.metrics`. Typically you will use
`metrics=['accuracy']`. A function is any callable with the
signature `result = fn(y_true, _pred)`. To specify different
metrics for different outputs of a multi-output model, you could
also pass a dictionary, such as
`metrics={'a':'accuracy', 'b':['accuracy', 'mse']}`.
You can also pass a list to specify a metric or a list of
metrics for each output, such as
`metrics=[['accuracy'], ['accuracy', 'mse']]`
or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass
the strings 'accuracy' or 'acc', we convert this to one of
`keras.metrics.BinaryAccuracy`,
`keras.metrics.CategoricalAccuracy`,
`keras.metrics.SparseCategoricalAccuracy` based on the
shapes of the targets and of the model output. A similar
conversion is done for the strings `"crossentropy"`
and `"ce"` as well.
The metrics passed here are evaluated without sample weighting;
if you would like sample weighting to apply, you can specify
your metrics via the `weighted_metrics` argument instead.
weighted_metrics: List of metrics to be evaluated and weighted by
`sample_weight` or `class_weight` during training and testing.
run_eagerly: Bool. If `True`, this model's forward pass
will never be compiled. It is recommended to leave this
as `False` when training (for best performance),
and to set it to `True` when debugging.
steps_per_execution: Int. The number of batches to run
during each a single compiled function call. Running multiple
batches inside a single compiled function call can
greatly improve performance on TPUs or small models with a large
Python overhead. At most, one full epoch will be run each
execution. If a number larger than the size of the epoch is
passed, the execution will be truncated to the size of the
epoch. Note that if `steps_per_execution` is set to `N`,
`Callback.on_batch_begin` and `Callback.on_batch_end` methods
will only be called every `N` batches (i.e. before/after
each compiled function execution).
Not supported with the PyTorch backend.
jit_compile: Bool or `"auto"`. Whether to use XLA compilation when
compiling a model. For `jax` and `tensorflow` backends,
`jit_compile="auto"` enables XLA compilation if the model
supports it, and disabled otherwise.
For `torch` backend, `"auto"` will default to eager
execution and `jit_compile=True` will run with `torch.compile`
with the `"inductor"` backend.
auto_scale_loss: Bool. If `True` and the model dtype policy is
`"mixed_float16"`, the passed optimizer will be automatically
wrapped in a `LossScaleOptimizer`, which will dynamically
scale the loss to prevent underflow.
"""
optimizer = optimizers.get(optimizer)
self.optimizer = optimizer
if (
auto_scale_loss
and self.dtype_policy.name == "mixed_float16"
and self.optimizer
and not isinstance(self.optimizer, LossScaleOptimizer)
):
self.optimizer = LossScaleOptimizer(
self.optimizer, name="loss_scale_optimizer"
)
if hasattr(self, "output_names"):
output_names = self.output_names
else:
output_names = None
if loss is not None:
self._compile_loss = CompileLoss(
loss, loss_weights, output_names=output_names
)
self.loss = loss
if metrics is not None or weighted_metrics is not None:
self._compile_metrics = CompileMetrics(
metrics, weighted_metrics, output_names=output_names
)
if jit_compile == "auto":
if run_eagerly:
jit_compile = False
else:
jit_compile = self._resolve_auto_jit_compile()
if jit_compile and run_eagerly:
jit_compile = False
warnings.warn(
"If `run_eagerly` is True, then `jit_compile` "
"cannot also be True. Disabling `jit_compile`.",
stacklevel=2,
)
self.jit_compile = jit_compile
self.run_eagerly = run_eagerly
self.stop_training = False
self.compiled = True
self._loss_tracker = metrics_module.Mean(name="loss")
self.steps_per_execution = steps_per_execution
self.train_function = None
self.test_function = None
self.predict_function = None
self._compile_config = serialization_lib.SerializableDict(
optimizer=optimizer,
loss=loss,
loss_weights=loss_weights,
metrics=metrics,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
jit_compile=jit_compile,
)
@property
def jit_compile(self):
if self._jit_compile is None:
# Value was never set. Resolve it now.
self._jit_compile = self._resolve_auto_jit_compile()
return self._jit_compile
@jit_compile.setter
def jit_compile(self, value):
if value and not model_supports_jit(self):
warnings.warn(
"Model doesn't support `jit_compile=True`. "
"Proceeding with `jit_compile=False`."
)
self._jit_compile = False
else:
self._jit_compile = value
def _resolve_auto_jit_compile(self):
if backend.backend() == "torch":
# jit_compile = "auto" with the pytorch backend defaults to eager
return False
if backend.backend() == "tensorflow":
import tensorflow as tf
devices = tf.config.list_physical_devices()
if not list(filter(lambda x: x.device_type != "CPU", devices)):
# Disable XLA on CPU-only machines.
return False
if self._distribute_strategy:
# Disable XLA with tf.distribute
return False
if model_supports_jit(self):
return True
return False
@property
def run_eagerly(self):
return self._run_eagerly
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
@property
def metrics(self):
# Order: loss tracker, individual loss trackers, compiled metrics,
# custom metrics, sublayer metrics.
metrics = []
if self.compiled:
if self._loss_tracker is not None:
metrics.append(self._loss_tracker)
if self._compile_metrics is not None:
metrics.append(self._compile_metrics)
if self._compile_loss is not None:
metrics.extend(self._compile_loss.metrics)
metrics.extend(self._metrics)
for layer in self._flatten_layers(include_self=False):
if isinstance(layer, Trainer):
# All Trainer-related metrics in sublayers should be ignored
# because a new Trainer has been instantiated.
continue
metrics.extend(layer.metrics)
return metrics
@property
def metrics_names(self):
return [m.name for m in self.metrics]
def reset_metrics(self):
for m in self.metrics:
m.reset_state()
def _get_own_metrics(self):
metrics = []
if self._loss_tracker is not None:
metrics.append(self._loss_tracker)
if self._compile_metrics is not None:
metrics.append(self._compile_metrics)
if self._compile_loss is not None:
metrics.extend(self._compile_loss.metrics)
metrics.extend(self._metrics)
return metrics
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
"""Compute the total loss, validate it, and return it.
Subclasses can optionally override this method to provide custom loss
computation logic.
Example:
```python
class MyModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = metrics.Mean(name='loss')
def compute_loss(self, x, y, y_pred, sample_weight, training=True):
loss = ops.mean((y_pred - y) ** 2)
loss += ops.sum(self.losses)
self.loss_tracker.update_state(loss)
return loss
def reset_metrics(self):
self.loss_tracker.reset_state()
@property
def metrics(self):
return [self.loss_tracker]
inputs = layers.Input(shape=(10,), name='my_input')
outputs = layers.Dense(10)(inputs)
model = MyModel(inputs, outputs)
model.add_loss(ops.sum(outputs))
optimizer = SGD()
model.compile(optimizer, loss='mse', steps_per_execution=10)
dataset = ...
model.fit(dataset, epochs=2, steps_per_epoch=10)
print(f"Custom loss: {model.loss_tracker.result()}")
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model (output of `model(x)`)
sample_weight: Sample weights for weighting the loss function.
training: Whether we are training or evaluating the model.
Returns:
The total loss as a scalar tensor, or `None` if no loss results
(which is the case when called by `Model.test_step`).
"""
# The default implementation does not use `x` or `training`.
del x
del training
losses = []
if self._compile_loss is not None:
loss = self._compile_loss(y, y_pred, sample_weight)
if loss is not None:
losses.append(loss)
for loss in self.losses:
losses.append(self._aggregate_additional_loss(loss))
if backend.backend() != "jax" and len(losses) == 0:
raise ValueError(
"No loss to compute. Provide a `loss` argument in `compile()`."
)
if len(losses) == 1:
total_loss = losses[0]
elif len(losses) == 0:
total_loss = ops.zeros(())
else:
total_loss = ops.sum(losses)
return total_loss
def _compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
"""Backwards compatibility wrapper for `compute_loss`.
This should be used instead `compute_loss` within `train_step` and
`test_step` to support overrides of `compute_loss` that may not have
the `training` argument, as this argument was added in Keras 3.3.
"""
if self._compute_loss_has_training_arg:
return self.compute_loss(
x, y, y_pred, sample_weight, training=training
)
else:
return self.compute_loss(x, y, y_pred, sample_weight)
def _aggregate_additional_loss(self, loss):
"""Aggregates losses from `add_loss`, regularizers and sublayers.
Args:
loss: A tensor representing the additional loss to aggregate.
Returns:
A tensor representing the summed loss, cast to the `floatx()` if
necessary.
"""
if not backend.is_float_dtype(loss.dtype):
loss = ops.cast(loss, dtype=backend.floatx())
return ops.sum(loss)
def stateless_compute_loss(
self,
trainable_variables,
non_trainable_variables,
metrics_variables,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
var_mapping = list(zip(self.trainable_variables, trainable_variables))
var_mapping.extend(
zip(self.non_trainable_variables, non_trainable_variables)
)
var_mapping.extend(zip(self.metrics_variables, metrics_variables))
with backend.StatelessScope(state_mapping=var_mapping) as scope:
# Note that this is needed for the regularization loss, which need
# the latest value of train/non-trainable variables.
loss = self._compute_loss(
x,
y,
y_pred,
sample_weight=sample_weight,
training=training,
)
# Update non trainable vars (may have been updated in compute_loss)
non_trainable_variables = []
for v in self.non_trainable_variables:
new_v = scope.get_current_value(v)
non_trainable_variables.append(new_v)
# Update metrics vars (may have been updated in compute_loss)
metrics_variables = []
for v in self.metrics_variables:
new_v = scope.get_current_value(v)
metrics_variables.append(new_v)
return loss, (
trainable_variables,
non_trainable_variables,
metrics_variables,
)
def compute_metrics(self, x, y, y_pred, sample_weight=None):
"""Update metric states and collect all metrics to be returned.
Subclasses can optionally override this method to provide custom metric
updating and collection logic. Custom metrics are not passed in
`compile()`, they can be created in `__init__` or `build`. They are
automatically tracked and returned by `self.metrics`.
Example:
```python
class MyModel(Sequential):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_metric = MyMetric(name="custom_metric")
def compute_metrics(self, x, y, y_pred, sample_weight):
# This super call updates metrics from `compile` and returns
# results for all metrics listed in `self.metrics`.
metric_results = super().compute_metrics(
x, y, y_pred, sample_weight)
# `metric_results` contains the previous result for
# `custom_metric`, this is where we update it.
self.custom_metric.update_state(x, y, y_pred, sample_weight)
metric_results['custom_metric'] = self.custom_metric.result()
return metric_results
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model output of `model.call(x)`.
sample_weight: Sample weights for weighting the loss function.
Returns:
A `dict` containing values that will be passed to
`keras.callbacks.CallbackList.on_train_batch_end()`. Typically,
the values of the metrics listed in `self.metrics` are returned.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
del x # The default implementation does not use `x`.
if self._compile_metrics is not None:
self._compile_metrics.update_state(y, y_pred, sample_weight)
return self.get_metrics_result()
def get_metrics_result(self):
"""Returns the model's metrics values as a dict.
If any of the metric result is a dict (containing multiple metrics),
each of them gets added to the top level returned dict of this method.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return python_utils.pythonify_logs(return_metrics)
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
"""Trains the model for a fixed number of epochs (dataset iterations).
Args:
x: Input data. It can be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A backend-native tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `keras.utils.PyDataset` returning `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `tf.data.Dataset` yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `torch.utils.data.DataLoader` yielding `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
- A Python generator function yielding `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
y: Target data. Like the input data `x`, it can be either NumPy
array(s) or backend-native tensor(s). If `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or a Python generator function,
`y` should not be specified since targets will be obtained from
`x`.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your input data `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided
(unless the `steps_per_epoch` flag is set to
something other than None).
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
"auto" becomes 1 for most cases.
Note that the progress bar is not
particularly useful when logged to a file,
so `verbose=2` is recommended when not running interactively
(e.g., in a production environment). Defaults to `"auto"`.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `keras.callbacks`. Note
`keras.callbacks.ProgbarLogger` and
`keras.callbacks.History` callbacks are created
automatically and need not be passed to `model.fit()`.
`keras.callbacks.ProgbarLogger` is created
or not based on the `verbose` argument in `model.fit()`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model
metrics on this data at the end of each epoch. The validation
data is selected from the last samples in the `x` and `y` data
provided, before shuffling.
This argument is only supported when `x` and `y` are made of
NumPy arrays or tensors.
If both `validation_data` and `validation_split` are provided,
`validation_data` will override `validation_split`.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. Thus, note the fact
that the validation loss of data provided using
`validation_split` or `validation_data` is not affected by
regularization layers like noise and dropout.
`validation_data` will override `validation_split`.
It can be:
- A tuple `(x_val, y_val)` of NumPy arrays or tensors.
- A tuple `(x_val, y_val, val_sample_weights)` of NumPy
arrays.
- A `keras.utils.PyDataset`, a `tf.data.Dataset`, a
`torch.utils.data.DataLoader` yielding `(inputs, targets)` or a
Python generator function yielding `(x_val, y_val)` or
`(inputs, targets, sample_weights)`.
shuffle: Boolean, whether to shuffle the training data before each
epoch. This argument is ignored when `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class. When `class_weight` is specified
and targets have a rank of 2 or greater, either `y` must be
one-hot encoded, or an explicit final dimension of `1` must
be included for sparse class labels.
sample_weight: Optional NumPy array or tensor of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
NumPy array or tensor with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D NumPy array or tensor with
shape `(samples, sequence_length)` to apply a different weight
to every timestep of every sample.
This argument is not supported when `x` is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function.
Instead, provide `sample_weights` as the third element of `x`.
Note that sample weighting does not apply to metrics specified
via the `metrics` argument in `compile()`. To apply sample
weighting to your metrics, you can specify them via the
`weighted_metrics` in `compile()` instead.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples) before declaring one
epoch finished and starting the next epoch. When training with
input tensors or NumPy arrays, the default `None` means that the
value used is the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
If `x` is a `keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function, the
epoch will run until the input dataset is exhausted. When
passing an infinitely repeating dataset, you must specify the
`steps_per_epoch` argument, otherwise the training will run
indefinitely.
validation_steps: Integer or `None`.
Only relevant if `validation_data` is provided.
Total number of steps (batches of samples) to draw before
stopping when performing validation at the end of every epoch.
If `validation_steps` is `None`, validation will run until the
`validation_data` dataset is exhausted. In the case of an
infinitely repeating dataset, it will run indefinitely. If
`validation_steps` is specified and only part of the dataset
is consumed, the evaluation will start from the beginning of the
dataset at each epoch. This ensures that the same validation
samples are used every time.
validation_batch_size: Integer or `None`.
Number of samples per validation batch.
If unspecified, will default to `batch_size`.
Do not specify the `validation_batch_size` if your data is a
`keras.utils.PyDataset`, `tf.data.Dataset`,
`torch.utils.data.DataLoader` or Python generator function
since they generate batches.
validation_freq: Only relevant if validation data is provided.
Specifies how many training epochs to run
before a new validation run is performed,
e.g. `validation_freq=2` runs validation every 2 epochs.
Unpacking behavior for iterator-like inputs:
A common pattern is to pass an iterator like object such as a
`tf.data.Dataset` or a `keras.utils.PyDataset` to `fit()`,
which will in fact yield not only features (`x`)
but optionally targets (`y`) and sample weights (`sample_weight`).
Keras requires that the output of such iterator-likes be
unambiguous. The iterator should return a tuple
of length 1, 2, or 3, where the optional second and third elements
will be used for `y` and `sample_weight` respectively.
Any other type provided will be wrapped in
a length-one tuple, effectively treating everything as `x`. When
yielding dicts, they should still adhere to the top-level tuple
structure,
e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate
features, targets, and weights from the keys of a single dict.
A notable unsupported data type is the `namedtuple`. The reason is
that it behaves like both an ordered datatype (tuple) and a mapping
datatype (dict). So given a namedtuple of the form:
`namedtuple("example_tuple", ["y", "x"])`
it is ambiguous whether to reverse the order of the elements when
interpreting the value. Even worse is a tuple of the form:
`namedtuple("other_tuple", ["x", "y", "z"])`
where it is unclear if the tuple was intended to be unpacked
into `x`, `y`, and `sample_weight` or passed through
as a single element to `x`.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
"""
raise NotImplementedError
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches (see the `batch_size` arg.)
Args:
x: Input data. It can be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/__init__.py | keras/src/trainers/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/trainer_test.py | keras/src/trainers/trainer_test.py | from unittest import mock
import jax
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
from keras.src.backend import config
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.callbacks.callback import Callback
from keras.src.distribution.distribution_lib import DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import py_dataset_adapter
if backend.backend() == "jax":
from keras.src.backend.jax.trainer import JAXTrainer as Trainer
from keras.src.distribution import DataParallel
from keras.src.distribution import DeviceMesh
elif backend.backend() == "torch":
from keras.src.backend.torch.trainer import TorchTrainer as Trainer
elif backend.backend() == "tensorflow":
from keras.src.backend.tensorflow.trainer import (
TensorFlowTrainer as Trainer,
)
elif backend.backend() == "numpy":
from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer
elif backend.backend() == "openvino":
from keras.src.backend.openvino.trainer import OpenVINOTrainer as Trainer
else:
raise ImportError(f"Invalid backend: {backend.backend()}")
# A model is just a layer mixed in with a Trainer.
class ExampleModel(Trainer, layers.Dense):
def __init__(self, units):
layers.Dense.__init__(
self,
units=units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
Trainer.__init__(self)
class CustomTrainTestStepModel(ExampleModel):
def train_step(self, data):
logs = super().train_step(data)
logs["my_custom_metric"] = 10.0
return logs
def test_step(self, data):
logs = super().test_step(data)
logs["my_custom_metric"] = 5.0
return logs
class JaxCustomTrainTestStepModel(ExampleModel):
def train_step(self, state, data):
logs, state = super().train_step(state, data)
logs["my_custom_metric"] = 10.0
return logs, state
def test_step(self, state, data):
logs, state = super().test_step(state, data)
logs["my_custom_metric"] = 5.0
return logs, state
class StructModel(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
return {
"y_one": self.dense_1(x["x_one"]),
"y_two": self.dense_2(x["x_two"]),
}
class ListInputModel(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
assert isinstance(x, (list, tuple))
return self.dense_1(x[0]) + self.dense_2(x[1])
class ListOutputModel(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
return [self.dense_1(x), self.dense_2(x)]
class TrainingTestingLayer(Trainer, layers.Layer):
def __init__(self, **kwargs):
layers.Layer.__init__(self, **kwargs)
Trainer.__init__(self)
def call(self, x, training=False):
if training:
return x
return x * 0
class TestPyDataset(py_dataset_adapter.PyDataset):
def __init__(self, infinite=False, **kwargs):
super().__init__(**kwargs)
self.infinite = infinite
@property
def num_batches(self):
return None if self.infinite else 20
def __getitem__(self, idx):
CPU_DEVICES = {
"tensorflow": "CPU:0",
"jax": "cpu:0",
"torch": "cpu",
}
with backend.device(CPU_DEVICES[backend.backend()]):
return ops.ones((5, 4)), ops.zeros((5, 3))
def create_dataset(dataset_type, dataset_kwargs):
if dataset_type == "np_array":
return np.ones((100, 4)), np.zeros((100, 3))
elif dataset_type == "native_array":
return ops.ones((100, 4)), ops.zeros((100, 3))
elif dataset_type == "py_dataset":
return TestPyDataset(**dataset_kwargs), None
elif dataset_type == "tf_dataset":
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices(
(tf.ones((100, 4)), tf.zeros((100, 3)))
).batch(5)
if dataset_kwargs.get("infinite", False):
dataset = dataset.repeat()
return dataset, None
elif dataset_type == "torch_dataloader":
import torch
class TestIterableDataset(torch.utils.data.IterableDataset):
def __iter__(self):
for _ in range(20):
yield torch.ones((5, 4)), torch.zeros((5, 3))
class TestIterableDatasetWithLen(TestIterableDataset):
def __len__(self):
return 20
if dataset_kwargs.get("iterable", False):
if dataset_kwargs.get("has_len", False):
dataset = TestIterableDatasetWithLen()
else:
dataset = TestIterableDataset()
return torch.utils.data.DataLoader(dataset), None
else:
dataset = torch.utils.data.TensorDataset(
torch.ones((100, 4)), torch.zeros((100, 3))
)
return torch.utils.data.DataLoader(dataset, batch_size=5), None
elif dataset_type == "generator":
def generate_finite():
for _ in range(20):
yield ops.ones((5, 4)), ops.zeros((5, 3))
def generate_infinite():
while True:
yield ops.ones((5, 4)), ops.zeros((5, 3))
if dataset_kwargs.get("infinite", False):
return generate_infinite(), None
else:
return generate_finite(), None
elif dataset_type == "grain_datast":
import grain
class TestIterableDataset(grain.sources.RandomAccessDataSource):
def __init__(self):
super().__init__()
self.x = np.ones((100, 4)).astype("float32")
self.y = np.zeros((100, 3)).astype("float32")
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
if dataset_kwargs.get("use_dataloader", False):
source = TestIterableDataset()
dataloader = grain.DataLoader(
data_source=source,
sampler=grain.samplers.IndexSampler(len(source), num_epochs=1),
operations=[grain.transforms.Batch(batch_size=5)],
)
return dataloader, None
else:
dataset = grain.MapDataset.source(TestIterableDataset())
if dataset_kwargs.get("has_len", False):
dataset = dataset.to_iter_dataset()
dataset = dataset.batch(5)
return dataset, None
else:
raise ValueError(f"Invalid dataset type {dataset_type}")
def sparse_generator(generator_type):
if generator_type == "scipy":
import scipy
for _ in range(4):
x = scipy.sparse.random(2, 4, density=0.25, dtype="float32")
y = np.random.rand(2, 3).astype("float32")
yield x, y
elif generator_type == "tf":
import tensorflow as tf
for _ in range(4):
x = tf.random.uniform((2, 4), dtype="float32")
x = tf.sparse.from_dense(tf.nn.dropout(x, 0.25))
y = tf.random.uniform((2, 3), dtype="float32")
yield x, y
elif generator_type == "jax":
import jax
import jax.experimental.sparse as jax_sparse
for _ in range(4):
seed = jax.random.PRNGKey(0)
x = jax_sparse.random_bcoo(seed, (2, 4), dtype="float32", nse=0.25)
y = jax.random.uniform(seed, (2, 3), dtype="float32")
yield x, y
else:
raise ValueError(f"Invalid generator type {generator_type}")
class EpochAgnosticMeanSquaredError(metrics.MeanSquaredError):
def __init__(self):
super().__init__(name="mse")
super().reset_state()
def reset_state(self):
# prevent reset at each starting epoch
pass
class StepObserver(Callback):
def __init__(self):
super().__init__()
self.begin_count = 0
self.end_count = 0
self.epoch_begin_count = 0
self.epoch_end_count = 0
self.batch_loss_history = []
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin_count += 1
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_count += 1
def on_batch_begin(self, batch, logs=None):
self.begin_count += 1
def on_batch_end(self, batch, logs=None):
self.end_count += 1
self.batch_loss_history.append(logs["mse"])
class StepCount(Callback):
def __init__(self, steps_per_execution=1):
super().__init__()
self.begin_count = 0
self.end_count = 0
self.epoch_begin_count = 0
self.epoch_end_count = 0
self.steps_per_execution = steps_per_execution
def on_epoch_begin(self, epoch, logs=None):
self.begin_count = 0
self.end_count = 0
self.epoch_begin_count += 1
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_count += 1
def on_batch_begin(self, batch, logs=None):
assert batch == self.begin_count * self.steps_per_execution
self.begin_count += 1
def on_batch_end(self, batch, logs=None):
self.end_count += 1
assert batch == self.end_count * self.steps_per_execution - 1
class TestTrainer(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_metric_tracking(self):
class ModelWithMetric(Trainer, layers.Dense):
def __init__(self, units):
layers.Dense.__init__(
self,
units=units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
Trainer.__init__(self)
self.my_metric = metrics.MeanSquaredError(name="my_metric")
model = ModelWithMetric(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
x = np.ones((2, 4))
y = np.zeros((2, 3))
# Fit the model to make sure compile_metrics are built
model.fit(x, y, batch_size=2, epochs=1)
# The model should have 3 metrics: loss_tracker, compile_metrics,
# my_metric.
self.assertEqual(len(model.metrics), 3)
self.assertEqual(model.metrics[0], model._loss_tracker)
self.assertEqual(model.metrics[1], model._compile_metrics)
self.assertEqual(model.metrics[2], model.my_metric)
# All metrics should have their weights created
self.assertEqual(len(model._loss_tracker.variables), 2)
self.assertEqual(len(model._compile_metrics.variables), 2)
self.assertEqual(len(model.my_metric.variables), 2)
# And those weights are tracked at the model level
self.assertEqual(len(model.metrics_variables), 6)
self.assertLen(model.non_trainable_variables, 0)
# Models with only weighted_metrics should have the same 3 metrics
model_weighted = ModelWithMetric(units=3)
model_weighted.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
weighted_metrics=[metrics.MeanSquaredError()],
)
model_weighted.fit(
x,
y,
batch_size=2,
epochs=1,
sample_weight=np.ones(2),
)
self.assertEqual(len(model_weighted.metrics), 3)
def test_nested_trainer_metrics(self):
# https://github.com/keras-team/keras/issues/20188
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(model.metrics, 2)
self.assertEqual(model.metrics[0], model._loss_tracker)
self.assertEqual(model.metrics[1], model._compile_metrics)
inputs = keras.Input((4,))
outputs = model(inputs)
outputs = layers.Dense(8)(outputs)
new_model = models.Model(inputs, outputs)
new_model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(new_model.metrics, 2)
self.assertEqual(new_model.metrics[0], new_model._loss_tracker)
self.assertEqual(new_model.metrics[1], new_model._compile_metrics)
def test_nested_trainer_metrics_without_compile(self):
model = ExampleModel(units=3)
self.assertLen(model.metrics, 0)
inputs = keras.Input((4,))
outputs = model(inputs)
outputs = layers.Dense(8)(outputs)
new_model = models.Model(inputs, outputs)
new_model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(new_model.metrics, 2)
self.assertEqual(new_model.metrics[0], new_model._loss_tracker)
self.assertEqual(new_model.metrics[1], new_model._compile_metrics)
def test_multiple_compiles(self):
# https://github.com/keras-team/keras/issues/20474
model1 = ExampleModel(units=3)
model2 = ExampleModel(units=3)
model1.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
# Combine these 2 models into `combined`.
inputs = keras.Input(shape=(4,))
x = model1(inputs)
outputs = model2(x)
combined = models.Model(inputs, outputs)
combined.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(model1.metrics, 2)
self.assertIsNotNone(model1._loss_tracker)
self.assertEqual(model1.metrics[0], model1._loss_tracker)
self.assertEqual(model1.metrics[1], model1._compile_metrics)
# `combined.metrics` will not include `model1.metrics`.
self.assertLen(combined.metrics, 2)
self.assertIsNotNone(combined._loss_tracker)
self.assertEqual(combined.metrics[0], combined._loss_tracker)
self.assertEqual(combined.metrics[1], combined._compile_metrics)
@pytest.mark.skipif(
backend.backend() != "torch",
reason="torch backend runs in eager mode for jit_compile='auto'",
)
def test_compile_eager_vs_jit_torch(self):
model = ExampleModel(units=3)
model.compile(jit_compile="auto")
# torch trainer en/disables torch.compile only based on the value of
# model.jit_compile (not model.run_eagerly)
self.assertFalse(model.run_eagerly)
self.assertFalse(model.jit_compile)
@parameterized.named_parameters(
[
("eager", True, False, False),
("graph_fn", False, False, False),
("jit", False, True, False),
("steps_per_epoch_eager", True, False, True),
("steps_per_epoch_graph_fn", False, False, True),
("steps_per_epoch_jit", False, True, True),
]
)
@pytest.mark.requires_trainable_backend
def test_fit_flow(self, run_eagerly, jit_compile, use_steps_per_epoch):
if not run_eagerly and not jit_compile and use_steps_per_epoch:
if False and backend.backend() == "tensorflow":
self.skipTest(
"TODO: Graph mode without XLA in TF backend leads to "
"unexpected logs, need further checks."
)
if jit_compile and backend.backend() == "torch":
self.skipTest(
"TODO: compilation with torch backend leads to "
"unexpected logs, need further checks."
)
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("mean_squared_error", history)
self.assertAllClose(
history["mean_squared_error"],
[14.5, 11.5, 8.5],
atol=1.0, # TODO: results vary across backends
)
@parameterized.named_parameters(
[
{
"testcase_name": "np_array",
"dataset_type": "np_array",
"fit_kwargs": {"batch_size": 5},
},
{
"testcase_name": "native_array",
"dataset_type": "native_array",
"fit_kwargs": {"batch_size": 5},
},
{
"testcase_name": "py_dataset",
"dataset_type": "py_dataset",
},
{
"testcase_name": "py_dataset_cw",
"dataset_type": "py_dataset",
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "py_dataset_infinite_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {
"steps_per_epoch": 20,
"class_weight": {0: 1, 1: 2},
},
},
{
"testcase_name": "py_dataset_multithreading",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2},
},
{
"testcase_name": "py_dataset_multithreading_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2},
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_multithreading_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True, "workers": 2},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "py_dataset_multiprocessing",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2, "use_multiprocessing": True},
},
{
"testcase_name": "py_dataset_multiprocessing_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2, "use_multiprocessing": True},
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_multiprocessing_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {
"infinite": True,
"workers": 2,
"use_multiprocessing": True,
},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "tf_dataset",
"dataset_type": "tf_dataset",
},
{
"testcase_name": "tf_dataset_infinite",
"dataset_type": "tf_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "torch_dataloader_tensor",
"dataset_type": "torch_dataloader",
},
{
"testcase_name": "torch_dataloader_iterable",
"dataset_type": "torch_dataloader",
"dataset_kwargs": {"iterable": True, "has_len": False},
},
{
"testcase_name": "torch_dataloader_iterable_with_len",
"dataset_type": "torch_dataloader",
"dataset_kwargs": {"iterable": True, "has_len": True},
},
{
"testcase_name": "generator",
"dataset_type": "generator",
},
{
"testcase_name": "generator_infinite",
"dataset_type": "generator",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "grain_datast",
"dataset_type": "grain_datast",
"dataset_kwargs": {"has_len": False},
},
{
"testcase_name": "grain_datast_with_len",
"dataset_type": "grain_datast",
"dataset_kwargs": {"has_len": True},
},
{
"testcase_name": "grain_dataloader",
"dataset_type": "grain_datast",
"dataset_kwargs": {"use_dataloader": True},
},
]
)
@pytest.mark.requires_trainable_backend
def test_fit_with_data_adapter(
self, dataset_type, dataset_kwargs={}, fit_kwargs={}
):
jit_compile = True
if (
dataset_kwargs.get("use_multiprocessing", False)
and backend.backend() == "jax"
):
pytest.skip("Multiprocessing not supported with JAX backend")
if dataset_type == "grain_datast" and backend.backend() == "torch":
# Grain datasets are not supported with torch + jit_compile.
jit_compile = False
model = ExampleModel(units=3)
optimizer = optimizers.Adagrad()
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
jit_compile=jit_compile,
)
x, y = create_dataset(dataset_type, dataset_kwargs)
model.fit(x, y, epochs=3, **fit_kwargs)
@parameterized.named_parameters(
[
("eager", True, False, False),
("graph_fn", False, False, False),
("jit", False, True, False),
("steps_per_epoch_eager", True, False, True),
("steps_per_epoch_graph_fn", False, False, True),
("steps_per_epoch_jit", False, True, True),
]
)
@pytest.mark.requires_trainable_backend
def test_fit_with_val_split(
self, run_eagerly, jit_compile, use_steps_per_epoch
):
if not run_eagerly and not jit_compile and use_steps_per_epoch:
if backend.backend() == "tensorflow":
self.skipTest(
"TODO: Graph mode without XLA in TF backend leads to "
"unexpected logs, need further checks."
)
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
validation_split=0.2,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("val_loss", history)
# Test with backend-native tensors.
x = ops.ones((dataset_size, 4))
y = ops.zeros((dataset_size, 3))
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
validation_split=0.2,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("val_loss", history)
@pytest.mark.requires_trainable_backend
def test_fit_with_custom_train_step(self):
if backend.backend() == "jax":
model = JaxCustomTrainTestStepModel(units=3)
else:
model = CustomTrainTestStepModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(x, y, batch_size=batch_size)
history = history.history
self.assertIn("loss", history)
self.assertIn("mean_squared_error", history)
self.assertAllClose(history["my_custom_metric"], 10.0)
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_fit_sparse(self, generator_type, mode):
model = ExampleModel(units=3)
optimizer = optimizers.Adagrad()
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=False,
)
dataset = sparse_generator(generator_type)
sparse_variable_updates = False
def mock_optimizer_assign(variable, value):
nonlocal sparse_variable_updates
if value.__class__.__name__ == "IndexedSlices":
sparse_variable_updates = True
with mock.patch.object(
optimizer, "assign_sub", autospec=True
) as optimizer_assign_sub:
optimizer_assign_sub.side_effect = mock_optimizer_assign
model.fit(dataset)
# JAX does not produce sparse gradients the way we use it.
if backend.backend() != "jax":
# Verify tensors did not get densified along the way.
self.assertTrue(sparse_variable_updates)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_evaluate_flow(self, run_eagerly, jit_compile):
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
output = model.evaluate(x, y, batch_size=batch_size)
self.assertAllClose(output, [16.0, 16.0])
output = model.evaluate(x, y, batch_size=batch_size, return_dict=True)
self.assertIsInstance(output, dict)
self.assertIn("loss", output)
self.assertIn("mean_squared_error", output)
self.assertAllClose(output["mean_squared_error"], 16.0)
@parameterized.named_parameters([("flat", False), ("dict", True)])
@pytest.mark.requires_trainable_backend
def test_evaluate_with_custom_test_step(self, return_dict):
if backend.backend() == "jax":
model = JaxCustomTrainTestStepModel(units=3)
else:
model = CustomTrainTestStepModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
output = model.evaluate(
x, y, batch_size=batch_size, return_dict=return_dict
)
self.assertLen(output, 3)
if return_dict:
self.assertAllClose(output["my_custom_metric"], 5.0)
else:
self.assertAllClose(output[-1], 5.0) # Custom metrics go last.
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_evaluate_sparse(self, generator_type, mode):
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.Adagrad(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=False,
)
dataset = sparse_generator(generator_type)
model.evaluate(dataset)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_predict_flow(self, run_eagerly, jit_compile):
# Test basic example
model = ExampleModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = np.ones((100, 4))
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertAllClose(outputs, 4 * np.ones((100, 3)))
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_predict_flow_struct(self, run_eagerly, jit_compile):
# Test with input/output structs
model = StructModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = {
"x_one": np.ones((100, 4)),
"x_two": np.ones((100, 4)),
}
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertIsInstance(outputs, dict)
self.assertEqual(len(outputs), 2)
self.assertAllClose(outputs["y_one"], 4 * np.ones((100, 3)))
self.assertAllClose(outputs["y_two"], 4 * np.ones((100, 3)))
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/generator_data_adapter.py | keras/src/trainers/data_adapters/generator_data_adapter.py | import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator())
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.generator())
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if x is None:
return tf.experimental.Optional.empty(None)
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator():
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator())
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, lambda: itertools.chain(batches, generator)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/array_slicing.py | keras/src/trainers/data_adapters/array_slicing.py | import collections
import math
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.utils.module_utils import tensorflow as tf
try:
import pandas
except ImportError:
pandas = None
# Leave jax, tf, and torch arrays off this list. Instead we will use
# `__array__` to detect these types. Doing so allows us to avoid importing a
# backend framework we are not currently using just to do type-checking.
ARRAY_TYPES = (np.ndarray,)
if pandas:
ARRAY_TYPES = ARRAY_TYPES + (pandas.Series, pandas.DataFrame)
class Sliceable:
"""`Sliceable` wrapping a tensor.
A `Sliceable` implements the subscript operator to slice or index against
the first dimension of the array. It also has conversion methods for each
one of the backends.
Args:
array: the native array or tensor to wrap.
Attributes:
shape: the shape of the full dense native array.
"""
def __init__(self, array):
self.array = array
def __getitem__(self, indices):
"""Select elements in the 0th dimension.
Args:
indices: the indices to select. Only needs to support one dimension,
the 0th dimension. Should support a `slice` or a list, tuple,
`np.array` or 1D tensor.
Returns: A slice of `self.array`.
"""
return self.array[indices]
@classmethod
def cast(cls, x, dtype):
"""Cast a tensor to a different dtype.
Only called on a full array as provided by the user.
Args:
x: the tensor to cast.
Returns: the cast tensor.
"""
return x.astype(dtype)
@classmethod
def convert_to_numpy(cls, x):
"""Convert a tensor to a NumPy array.
Only called after slicing using `__getitem__`.
Args:
x: the tensor to convert.
Returns: the converted tensor.
"""
return x
@classmethod
def convert_to_tf_dataset_compatible(cls, x):
"""Convert a tensor to something compatible with `tf.data.Dataset`.
This can be a NumPy array, `tf.Tensor` or any other type of tensor that
`tf.data.Dataset.from_tensors` can consume.
Only called on a full array as provided by the user.
Args:
x: the tensor to convert.
Returns: converted version tensor.
"""
return x
@classmethod
def convert_to_jax_compatible(cls, x):
"""Convert a tensor to something that the JAX backend can consume.
This can be a `JAX` array, `JAXSparse` or a NumPy array.
Only called after slicing using `__getitem__`.
Used to convert sparse tensors and densify ragged tensors.
Args:
x: the tensor to convert.
Returns: the converted tensor.
"""
return x
@classmethod
def convert_to_torch_compatible(cls, x):
"""Convert a tensor to something that the Torch backend can consume.
This can be a Torch tensor, NumPy array or any other type of tensor that
`keras.backend.torch.core.convert_to_tensor()` can consume.
Only called after slicing using `__getitem__`.
Used to densify sparse tensors and ragged tensors.
Args:
x: the tensor to convert.
Returns: the converted tensor.
"""
return x
class NumpySliceable(Sliceable):
pass
class TensorflowSliceable(Sliceable):
def __getitem__(self, indices):
from keras.src.utils.module_utils import tensorflow as tf
if isinstance(indices, slice):
return self.array[indices]
else:
return tf.gather(self.array, indices, axis=0)
@classmethod
def cast(cls, x, dtype):
from keras.src.backend.tensorflow.core import cast
return cast(x, dtype)
@classmethod
def convert_to_numpy(cls, x):
from keras.src.backend.tensorflow.core import convert_to_numpy
return convert_to_numpy(x)
class TensorflowRaggedSliceable(TensorflowSliceable):
@classmethod
def convert_to_jax_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_torch_compatible(cls, x):
return x.to_tensor()
class TensorflowSparseSliceable(TensorflowSliceable):
def __init__(self, array):
super().__init__(to_tensorflow_sparse_wrapper(array))
@property
def shape(self):
return self.array.sparse.shape
def __getitem__(self, indices):
return slice_tensorflow_sparse_wrapper(self.array, indices)
@classmethod
def convert_to_tf_dataset_compatible(cls, x):
return to_tensorflow_sparse_wrapper(x)
@classmethod
def convert_to_jax_compatible(cls, x):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
@classmethod
def convert_to_torch_compatible(cls, x):
from keras.src.backend.tensorflow import sparse as tf_sparse
return tf_sparse.sparse_to_dense(x)
class JaxSparseSliceable(Sliceable):
def __getitem__(self, indices):
return self.array[indices, ...]
@classmethod
def convert_to_numpy(cls, x):
from keras.src.backend.jax.core import convert_to_numpy
return convert_to_numpy(x)
@classmethod
def convert_to_tf_dataset_compatible(cls, array):
return to_tensorflow_sparse_wrapper(
data_adapter_utils.jax_sparse_to_tf_sparse(array)
)
@classmethod
def convert_to_torch_compatible(cls, x):
return x.todense()
class TorchSliceable(Sliceable):
@classmethod
def cast(cls, x, dtype):
from keras.src.backend.torch.core import cast
return cast(x, dtype)
@classmethod
def convert_to_numpy(cls, x):
from keras.src.backend.torch.core import convert_to_numpy
return convert_to_numpy(x)
class PandasSliceable(Sliceable):
def __getitem__(self, indices):
return self.array.iloc[indices]
@classmethod
def convert_to_numpy(cls, x):
return x.to_numpy()
@classmethod
def convert_to_tf_dataset_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_jax_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_torch_compatible(cls, x):
return cls.convert_to_numpy(x)
class PandasDataFrameSliceable(PandasSliceable):
pass
class PandasSeriesSliceable(PandasSliceable):
@classmethod
def convert_to_numpy(cls, x):
return np.expand_dims(x.to_numpy(), axis=-1)
class ScipySparseSliceable(Sliceable):
def __init__(self, array):
# The COO representation is not indexable / sliceable and does not lend
# itself to it. Use the CSR representation instead, which is sliceable.
super().__init__(array.tocsr())
@classmethod
def convert_to_numpy(cls, x):
return x.todense()
@classmethod
def convert_to_tf_dataset_compatible(cls, x):
return to_tensorflow_sparse_wrapper(
data_adapter_utils.scipy_sparse_to_tf_sparse(x)
)
@classmethod
def convert_to_jax_compatible(cls, x):
return data_adapter_utils.scipy_sparse_to_jax_sparse(x)
@classmethod
def convert_to_torch_compatible(cls, x):
return x.todense()
# `tf.SparseTensor` does not support indexing or `tf.gather`. The COO
# representation it uses does not lend itself to indexing. We add some
# intermediary tensors to ease the indexing and slicing. We put both indices and
# values in `RaggedTensor`s where each row corresponds to a row in the sparse
# tensor. This is because the number of values per row is not fixed.
# `RaggedTensor`s do support indexing and `tf.gather`, although on CPU only.
# We then reconstruct a `SparseTensor` from extracted rows. In theory, there is
# no duplication of data for the indices and values, only the addition of row
# splits for the ragged representation.
# `TensorflowSparseWrapper` is a named tuple which combines the original
# `SparseTensor` (used for the shape) and the ragged representations of indices
# and values for indexing / slicing. We use a named tuple and not a `Sliceable`
# to be able to ingest it in `tf.data.Dataset.from_tensors()` and map it.
TensorflowSparseWrapper = collections.namedtuple(
"TensorflowSparseWrapper", ["sparse", "ragged_indices", "ragged_values"]
)
def to_tensorflow_sparse_wrapper(sparse):
from keras.src.utils.module_utils import tensorflow as tf
row_ids = sparse.indices[:, 0]
row_splits = tf.experimental.RowPartition.from_value_rowids(
row_ids
).row_splits()
ragged_indices = tf.cast(
tf.RaggedTensor.from_row_splits(sparse.indices, row_splits), tf.int64
)
ragged_values = tf.RaggedTensor.from_row_splits(sparse.values, row_splits)
return TensorflowSparseWrapper(sparse, ragged_indices, ragged_values)
def slice_tensorflow_sparse_wrapper(sparse_wrapper, indices):
from keras.src.utils.module_utils import tensorflow as tf
if isinstance(indices, slice):
sparse_indices = sparse_wrapper.ragged_indices[indices]
sparse_values = sparse_wrapper.ragged_values[indices]
batch_dim = indices.stop - indices.start
else:
sparse_indices = tf.gather(sparse_wrapper.ragged_indices, indices)
sparse_values = tf.gather(sparse_wrapper.ragged_values, indices)
if isinstance(indices, list):
batch_dim = len(indices)
else:
batch_dim = indices.shape[0]
if batch_dim is None:
batch_dim = tf.shape(indices)[0]
row_ids = sparse_indices.value_rowids()
sparse_indices = sparse_indices.flat_values[:, 1:] # remove first value
sparse_indices = tf.concat(
[tf.expand_dims(row_ids, -1), sparse_indices], axis=1
)
sparse_values = sparse_values.flat_values
sparse_shape = (batch_dim,) + tuple(
sparse_wrapper.sparse.shape.as_list()[1:]
)
return tf.SparseTensor(sparse_indices, sparse_values, sparse_shape)
def can_slice_array(x):
return (
x is None
or isinstance(x, ARRAY_TYPES)
or data_adapter_utils.is_tensorflow_tensor(x)
or data_adapter_utils.is_jax_array(x)
or data_adapter_utils.is_torch_tensor(x)
or data_adapter_utils.is_scipy_sparse(x)
or hasattr(x, "__array__")
)
def convert_to_sliceable(arrays, target_backend=None):
"""Convert a structure of arrays into `Sliceable` instances
Args:
arrays: the arrays to convert.
target_backend: the target backend for the output:
- `None` indicates that `arrays` will be wrapped into `Sliceable`s
as-is without using a different representation. This is used by
`train_validation_split()`.
- `tensorflow` indicates that
`Sliceable.convert_to_tf_dataset_compatible` will be called. The
returned structure therefore contains arrays, not `Sliceable`s.
- `numpy`, `jax` or `torch` indices that the arrays will eventually
be converted to this backend type after slicing. In this case,
the intermediary `Sliceable`s may use a different representation
from the input `arrays` for better performance.
Returns: the same structure with `Sliceable` instances or arrays.
"""
def convert_single_array(x):
if x is None:
return x
# Special case: handle np "object" arrays containing strings
if (
isinstance(x, np.ndarray)
and str(x.dtype) == "object"
and backend.backend() == "tensorflow"
and all(isinstance(e, str) for e in x)
):
x = tf.convert_to_tensor(x, dtype="string")
# Step 1. Determine which Sliceable class to use.
if isinstance(x, np.ndarray):
sliceable_class = NumpySliceable
elif data_adapter_utils.is_tensorflow_tensor(x):
if data_adapter_utils.is_tensorflow_ragged(x):
sliceable_class = TensorflowRaggedSliceable
elif data_adapter_utils.is_tensorflow_sparse(x):
sliceable_class = TensorflowSparseSliceable
else:
sliceable_class = TensorflowSliceable
elif data_adapter_utils.is_jax_array(x):
if data_adapter_utils.is_jax_sparse(x):
sliceable_class = JaxSparseSliceable
else:
x = np.asarray(x)
sliceable_class = NumpySliceable
elif data_adapter_utils.is_torch_tensor(x):
sliceable_class = TorchSliceable
elif pandas is not None and isinstance(x, pandas.DataFrame):
sliceable_class = PandasDataFrameSliceable
elif pandas is not None and isinstance(x, pandas.Series):
sliceable_class = PandasSeriesSliceable
elif data_adapter_utils.is_scipy_sparse(x):
sliceable_class = ScipySparseSliceable
elif hasattr(x, "__array__"):
x = np.asarray(x)
sliceable_class = NumpySliceable
else:
raise ValueError(
"Expected a NumPy array, tf.Tensor, tf.RaggedTensor, "
"tf.SparseTensor, jax.np.ndarray, "
"jax.experimental.sparse.JAXSparse, torch.Tensor, "
"Pandas Dataframe, or Pandas Series. Received invalid input: "
f"{x} (of type {type(x)})"
)
# Step 2. Normalize floats to floatx.
def is_non_floatx_float(dtype):
return (
dtype is not object
and backend.is_float_dtype(dtype)
and not backend.standardize_dtype(dtype) == backend.floatx()
)
cast_dtype = None
if pandas is not None and isinstance(x, pandas.DataFrame):
if any(is_non_floatx_float(d) for d in x.dtypes.values):
cast_dtype = backend.floatx()
else:
if is_non_floatx_float(x.dtype):
cast_dtype = backend.floatx()
if cast_dtype is not None:
x = sliceable_class.cast(x, cast_dtype)
# Step 3. Apply target backend specific logic and optimizations.
if target_backend is None:
return sliceable_class(x)
if target_backend == "tensorflow":
return sliceable_class.convert_to_tf_dataset_compatible(x)
# With dense arrays and JAX as output, it is faster to use NumPy as an
# intermediary representation, so wrap input array in a NumPy array,
# which should not use extra memory.
# See https://github.com/google/jax/issues/1276 for an explanation of
# why slicing a NumPy array is faster than slicing a JAX array.
if target_backend == "jax" and sliceable_class in (
TensorflowSliceable,
TorchSliceable,
):
x = np.asarray(x)
sliceable_class = NumpySliceable
return sliceable_class(x)
return tree.map_structure(convert_single_array, arrays)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested
structures of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset
to include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
flat_arrays = tree.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not can_slice_array(t)]
if unsplitable:
raise ValueError(
"Argument `validation_split` is only supported "
"for tensors or NumPy arrays."
f"Found incompatible type in the input: {unsplitable}"
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
f"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
f"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument."
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
sliceables = convert_to_sliceable(arrays)
train_arrays = tree.map_structure(
lambda x: _split(x, start=0, end=split_at), sliceables
)
val_arrays = tree.map_structure(
lambda x: _split(x, start=split_at, end=batch_dim), sliceables
)
return train_arrays, val_arrays
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/grain_dataset_adapter_test.py | keras/src/trainers/data_adapters/grain_dataset_adapter_test.py | import grain
import numpy as np
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import grain_dataset_adapter
class Range2DSource(grain.sources.RandomAccessDataSource):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __getitem__(self, idx):
return np.expand_dims(np.array([self.start + idx]), axis=0)
def __len__(self):
return self.stop - self.start
class GrainDatasetAdapterTest(testing.TestCase):
def _get_dataset(self, dataset_type, worker_count=0, num_threads=0):
x = np.random.normal(size=(34, 4)).astype("float32")
y = np.random.normal(size=(34, 2)).astype("float32")
class MySource(grain.sources.RandomAccessDataSource):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return len(self.x)
if dataset_type == "map_dataset":
dataset = grain.MapDataset.source(MySource(x, y)).batch(
batch_size=16
)
elif dataset_type == "iter_dataset":
dataset = (
grain.MapDataset.source(MySource(x, y))
.to_iter_dataset()
.batch(batch_size=16)
)
else:
source = MySource(x, y)
dataset = grain.DataLoader(
data_source=source,
operations=[grain.transforms.Batch(batch_size=16)],
shard_options=grain.sharding.NoSharding(),
sampler=grain.samplers.IndexSampler(
num_records=len(source), num_epochs=1
),
worker_count=worker_count,
read_options=grain.ReadOptions(num_threads=num_threads),
)
return dataset
@parameterized.named_parameters(
named_product(
dataset_type=["map_dataset", "iter_dataset", "data_loader"]
)
)
def test_basic_flow(self, dataset_type):
dataset = self._get_dataset(dataset_type)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, None)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
named_product(data_type=["list", "dict", "nested_list", "nested_dict"])
)
def test_nested_data(self, data_type):
if data_type not in ("list", "dict", "nested_list", "nested_dict"):
raise ValueError(
"data_type must be one of 'list', 'dict', 'nested_list' or "
f"'nested_dict'. Received: {data_type}"
)
class NestedSource(grain.sources.RandomAccessDataSource):
def __init__(self, data_type):
self.x = np.random.random((40, 4)).astype("float32")
self.y = np.random.random((40, 2)).astype("float32")
self.data_type = data_type
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x = self.x[idx]
y = self.y[idx]
if self.data_type == "list":
return x, y
elif self.data_type == "dict":
return {"x": x, "y": y}
elif self.data_type == "nested_list":
return x, (x, y)
elif self.data_type == "nested_dict":
return {"data": {"x": x, "y": y}}
dataset = grain.MapDataset.source(NestedSource(data_type)).batch(
batch_size=4
)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for batch in it:
if data_type == "list":
self.assertEqual(len(batch), 2)
bx, by = batch
elif data_type == "dict":
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
elif data_type == "nested_list":
self.assertEqual(len(batch), 2)
bx, (_, by) = batch
elif data_type == "nested_dict":
self.assertEqual(len(batch["data"]), 2)
bx, by = batch["data"]["x"], batch["data"]["y"]
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
def test_multiple_calling_on_iterators(self):
dataset = self._get_dataset("iter_dataset")
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
numpy_it = adapter.get_numpy_iterator()
jax_it = adapter.get_jax_iterator()
tf_it = adapter.get_tf_dataset()
torch_it = adapter.get_torch_dataloader()
for it in (numpy_it, jax_it, tf_it, torch_it):
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
def test_builtin_prefetch(self):
dataset = grain.MapDataset.source(Range2DSource(0, 42))
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertTrue(adapter.builtin_prefetch)
def test_num_batches(self):
dataset = grain.MapDataset.source(Range2DSource(0, 42))
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, None)
# Test for Infinite Cardinality
dataset = grain.MapDataset.source(Range2DSource(0, 42))
dataset = dataset.repeat()
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
# Test for Unknown Cardinality
dataset = dataset.filter(lambda x: True)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
def test_invalid_dataset_type(self):
with self.assertRaisesRegex(
ValueError,
(
r"Expected `dataset` to be a grain.MapDataset, "
r"grain.IterDataset or grain.DataLoader. "
),
):
grain_dataset_adapter.GrainDatasetAdapter(
"This is not a grain.Dataset"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/tf_dataset_adapter.py | keras/src/trainers/data_adapters/tf_dataset_adapter.py | from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Initialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(
convert_to_numpy, batch, none_is_leaf=False
)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch, none_is_leaf=False)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def builtin_prefetch(self):
return True
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1, output_type=tf.int32),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/py_dataset_adapter.py | keras/src/trainers/data_adapters/py_dataset_adapter.py | import itertools
import multiprocessing.dummy
import queue
import random
import threading
import warnings
import weakref
from contextlib import closing
import numpy as np
from keras.src.api_export import keras_export
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
@keras_export(["keras.utils.PyDataset", "keras.utils.Sequence"])
class PyDataset:
"""Base class for defining a parallel dataset using Python code.
Every `PyDataset` must implement the `__getitem__()` and the `__len__()`
methods. If you want to modify your dataset between epochs,
you may additionally implement `on_epoch_end()`,
or `on_epoch_begin` to be called at the start of each epoch.
The `__getitem__()` method should return a complete batch
(not a single sample), and the `__len__` method should return
the number of batches in the dataset (rather than the number of samples).
Args:
workers: Number of workers to use in multithreading or
multiprocessing.
use_multiprocessing: Whether to use Python multiprocessing for
parallelism. Setting this to `True` means that your
dataset will be replicated in multiple forked processes.
This is necessary to gain compute-level (rather than I/O level)
benefits from parallelism. However it can only be set to
`True` if your dataset can be safely pickled.
max_queue_size: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multiprocessed setting.
Reduce this value to reduce the CPU memory consumption of
your dataset. Defaults to 10.
Notes:
- `PyDataset` is a safer way to do multiprocessing.
This structure guarantees that the model will only train
once on each sample per epoch, which is not the case
with Python generators.
- The arguments `workers`, `use_multiprocessing`, and `max_queue_size`
exist to configure how `fit()` uses parallelism to iterate
over the dataset. They are not being used by the `PyDataset` class
directly. When you are manually iterating over a `PyDataset`,
no parallelism is applied.
Example:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10PyDataset(keras.utils.PyDataset):
def __init__(self, x_set, y_set, batch_size, **kwargs):
super().__init__(**kwargs)
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
# Return number of batches.
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.x))
batch_x = self.x[low:high]
batch_y = self.y[low:high]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
def __init__(self, workers=1, use_multiprocessing=False, max_queue_size=10):
self._workers = workers
self._use_multiprocessing = use_multiprocessing
self._max_queue_size = max_queue_size
def _warn_if_super_not_called(self):
warn = False
if not hasattr(self, "_workers"):
self._workers = 1
warn = True
if not hasattr(self, "_use_multiprocessing"):
self._use_multiprocessing = False
warn = True
if not hasattr(self, "_max_queue_size"):
self._max_queue_size = 10
warn = True
if warn:
warnings.warn(
"Your `PyDataset` class should call "
"`super().__init__(**kwargs)` in its constructor. "
"`**kwargs` can include `workers`, "
"`use_multiprocessing`, `max_queue_size`. Do not pass "
"these arguments to `fit()`, as they will be ignored.",
stacklevel=2,
)
@property
def workers(self):
self._warn_if_super_not_called()
return self._workers
@workers.setter
def workers(self, value):
self._workers = value
@property
def use_multiprocessing(self):
self._warn_if_super_not_called()
return self._use_multiprocessing
@use_multiprocessing.setter
def use_multiprocessing(self, value):
self._use_multiprocessing = value
@property
def max_queue_size(self):
self._warn_if_super_not_called()
return self._max_queue_size
@max_queue_size.setter
def max_queue_size(self, value):
self._max_queue_size = value
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the PyDataset.
Returns:
A batch
"""
del index
raise NotImplementedError
def __iter__(self):
index_range = None
try:
num_batches = self.num_batches
if num_batches is not None:
index_range = range(num_batches)
except NotImplementedError:
pass
if index_range is None:
index_range = itertools.count()
for index in index_range:
yield self[index]
@property
def num_batches(self):
"""Number of batches in the PyDataset.
Returns:
The number of batches in the PyDataset or `None` to indicate that
the dataset is infinite.
"""
# For backwards compatibility, support `__len__`.
if hasattr(self, "__len__"):
return len(self)
raise NotImplementedError(
"You need to implement the `num_batches` property:\n\n"
"@property\ndef num_batches(self):\n return ..."
)
def on_epoch_begin(self):
"""Method called at the beginning of every epoch."""
pass
def on_epoch_end(self):
"""Method called at the end of every epoch."""
pass
class PyDatasetAdapter(DataAdapter):
"""Adapter for `keras.utils.PyDataset` instances."""
def __init__(
self,
x,
class_weight=None,
shuffle=False,
):
self.py_dataset = x
self.class_weight = class_weight
self.enqueuer = None
self.shuffle = shuffle
self._output_signature = None
self._within_epoch = False
workers = self.py_dataset.workers
use_multiprocessing = self.py_dataset.use_multiprocessing
if workers > 1 or (workers > 0 and use_multiprocessing):
self.enqueuer = OrderedEnqueuer(
self.py_dataset,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=self.py_dataset.max_queue_size,
shuffle=self.shuffle,
)
def _standardize_batch(self, batch):
if isinstance(batch, dict):
return batch
if isinstance(batch, np.ndarray):
batch = (batch,)
if isinstance(batch, list):
batch = tuple(batch)
if not isinstance(batch, tuple) or len(batch) not in {1, 2, 3}:
raise ValueError(
"PyDataset.__getitem__() must return a tuple or a dict. "
"If a tuple, it must be ordered either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {str(batch)[:100]}... of type {type(batch)}"
)
if self.class_weight is not None:
if len(batch) == 3:
raise ValueError(
"You cannot specify `class_weight` "
"and `sample_weight` at the same time."
)
if len(batch) == 2:
sw = data_adapter_utils.class_weight_to_sample_weights(
batch[1], self.class_weight
)
batch = batch + (sw,)
return batch
def _infinite_generator(self):
for i in itertools.count():
yield self._standardize_batch(self.py_dataset[i])
def _finite_generator(self):
indices = range(self.py_dataset.num_batches)
if self.shuffle:
indices = list(indices)
random.shuffle(indices)
for i in indices:
yield self._standardize_batch(self.py_dataset[i])
def _infinite_enqueuer_generator(self):
self.enqueuer.start()
for batch in self.enqueuer.get():
yield self._standardize_batch(batch)
def _finite_enqueuer_generator(self):
self.enqueuer.start()
num_batches = self.py_dataset.num_batches
for i, batch in enumerate(self.enqueuer.get()):
yield self._standardize_batch(batch)
if i >= num_batches - 1:
self.enqueuer.stop()
return
def _get_iterator(self):
if self.enqueuer is None:
if self.py_dataset.num_batches is None:
return self._infinite_generator()
else:
return self._finite_generator()
else:
if self.py_dataset.num_batches is None:
return self._infinite_enqueuer_generator()
else:
return self._finite_enqueuer_generator()
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self._get_iterator())
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self._get_iterator())
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
num_batches = self.py_dataset.num_batches
if self._output_signature is None:
num_samples = data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
if num_batches is not None:
num_samples = min(num_samples, num_batches)
batches = [
self._standardize_batch(self.py_dataset[i])
for i in range(num_samples)
]
if len(batches) == 0:
raise ValueError("The PyDataset has length 0")
self._output_signature = data_adapter_utils.get_tensor_spec(batches)
ds = tf.data.Dataset.from_generator(
self._get_iterator,
output_signature=self._output_signature,
)
if self.enqueuer is not None:
# The enqueuer does its own multithreading / multiprocesssing to
# prefetch items. Disable the tf.data.Dataset prefetching and
# threading as it interferes.
options = tf.data.Options()
options.autotune.enabled = False
options.threading.private_threadpool_size = 1
ds = ds.with_options(options)
else:
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._get_iterator())
def on_epoch_begin(self):
if self._within_epoch:
raise ValueError(
"`on_epoch_begin` was called twice without `on_epoch_end` "
"having been called."
)
self._within_epoch = True
if self.enqueuer:
self.enqueuer.start()
self.py_dataset.on_epoch_begin()
def on_epoch_end(self):
if self.enqueuer:
self.enqueuer.stop()
self.py_dataset.on_epoch_end()
self._within_epoch = False
@property
def num_batches(self):
return self.py_dataset.num_batches
@property
def batch_size(self):
return None
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_FORCE_THREADPOOL = False
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def get_index(uid, i):
"""Get the value from the PyDataset `uid` at index `i`.
To allow multiple PyDatasets to be used at the same time, we use `uid` to
get a specific one. A single PyDataset would cause the validation to
overwrite the training PyDataset.
This methods is called from worker threads.
Args:
uid: int, PyDataset identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class PyDatasetEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = PyDatasetEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of data.
"""
def __init__(
self,
py_dataset,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
):
self.py_dataset = py_dataset
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value("i", 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.ready_queue = queue.Queue()
self.future_queue = queue.Queue(max_queue_size)
self.running = False
self.start_stop_lock = threading.Lock()
self.run_thread = None
if use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
def is_running(self):
"""Whether the enqueuer is running.
This method is thread safe and called from many threads.
Returns: boolean indicating whether this enqueuer is running.
"""
return self.running
def start(self):
"""Starts the handler's workers.
This method is thread safe but is called from the main thread.
It is safe to call this method multiple times, extra calls are ignored.
"""
with self.start_stop_lock:
if self.running:
return
self.running = True
self.run_thread = threading.Thread(target=self._run)
self.run_thread.name = f"Worker_{self.uid}"
self.run_thread.daemon = True
self.run_thread.start()
def stop(self, drain_queue_and_join=True):
"""Stops running threads and wait for them to exit, if necessary.
This method is thread safe and is called from various threads. Note that
the `drain_queue_and_join` argument must be set correctly.
It is safe to call this method multiple times, extra calls are ignored.
Args:
drain_queue_and_join: set to True to drain the queue of pending
items and wait for the worker thread to complete. Set to False
if invoked from a worker thread to avoid deadlocks. Note that
setting this to False means this enqueuer won't be reused.
"""
with self.start_stop_lock:
if not self.running:
return
self.running = False
if drain_queue_and_join:
# Drain the `future_queue` and put items in `ready_queue` for
# the next run.
while True:
try:
value = self.future_queue.get(block=True, timeout=0.1)
if isinstance(value, Exception):
raise value # Propagate exception from other thread
inputs = value.get()
self.future_queue.task_done()
if inputs is not None:
self.ready_queue.put(inputs)
except queue.Empty:
break
self.run_thread.join()
self.run_thread = None
_SHARED_SEQUENCES[self.uid] = None
def _send_py_dataset(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.py_dataset
def __del__(self):
self.stop(drain_queue_and_join=False)
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
This method is called from the main thread.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(PyDatasetEnqueuer):
"""Builds a Enqueuer from a PyDataset.
Args:
py_dataset: A `keras.utils.PyDataset` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(
self,
py_dataset,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
shuffle=False,
):
super().__init__(
py_dataset, workers, use_multiprocessing, max_queue_size
)
self.shuffle = shuffle
if self.py_dataset.num_batches is None:
# For infinite datasets, `self.indices` is created here once for all
# so that subsequent runs resume from where they stopped.
self.indices = itertools.count()
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers,
initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()),
)
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects.
This method is the run method of worker threads.
"""
try:
if self.py_dataset.num_batches is not None:
# For finite datasets, `self.indices` is created here so that
# shuffling creates different a order each time.
indices = range(self.py_dataset.num_batches)
if self.shuffle:
indices = list(indices)
random.shuffle(indices)
self.indices = iter(indices)
self._send_py_dataset() # Share the initial py_dataset
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while self.is_running():
try:
i = next(self.indices)
self.future_queue.put(
executor.apply_async(get_index, (self.uid, i)),
block=True,
)
except StopIteration:
break
except Exception as e:
self.future_queue.put(e) # Report exception
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
This method is called from the main thread.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.ready_queue.get(block=False)
yield inputs
continue # Retry the ready_queue
except queue.Empty:
pass
try:
value = self.future_queue.get(block=True, timeout=5)
self.future_queue.task_done()
if isinstance(value, Exception):
raise value # Propagate exception from other thread
inputs = value.get()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e:
self.stop(drain_queue_and_join=True)
raise e
# Note that it is ok to poll the iterator after the initial `start`,
# which may happen before the first `on_epoch_begin`. But it's not ok to
# poll after `on_epoch_end`.
raise ValueError(
"Iterator called after `on_epoch_end` or before `on_epoch_begin`."
)
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids.
This is used to indicate that a worker process
was created by Keras.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is
# helpful when diagnosing orphaned processes.
worker_proc.name = f"Keras_worker_{worker_proc.name}"
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/py_dataset_adapter_test.py | keras/src/trainers/data_adapters/py_dataset_adapter_test.py | import math
import time
import jax
import numpy as np
import pytest
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import py_dataset_adapter
from keras.src.utils.rng_utils import set_random_seed
class ExamplePyDataset(py_dataset_adapter.PyDataset):
def __init__(
self,
x_set,
y_set,
sample_weight=None,
batch_size=32,
delay=0,
infinite=False,
**kwargs,
):
super().__init__(**kwargs)
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.sample_weight = sample_weight
self.delay = delay
self.infinite = infinite
@property
def num_batches(self):
if self.infinite:
return None
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
# Create artificial delay to test multiprocessing
time.sleep(self.delay)
if self.infinite:
idx = idx % math.ceil(len(self.x) / self.batch_size)
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.x))
batch_x = self.x[low:high]
batch_y = self.y[low:high]
if self.sample_weight is not None:
return batch_x, batch_y, self.sample_weight[low:high]
return batch_x, batch_y
class DictPyDataset(py_dataset_adapter.PyDataset):
def __init__(self, inputs, batch_size=32, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.batch_size = batch_size
@property
def num_batches(self):
return math.ceil(len(self.inputs["x"]) / self.batch_size)
def __getitem__(self, idx):
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.inputs["x"]))
batch_x = self.inputs["x"][low:high]
batch_y = self.inputs["y"][low:high]
batch = {"x": batch_x, "y": batch_y}
return batch
class ExceptionPyDataset(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 4
def __getitem__(self, index):
if index < 2:
return (
np.random.random((8, 4)).astype("float32"),
np.random.random((8, 2)).astype("float32"),
)
raise ValueError("Expected exception")
@pytest.mark.skipif(
testing.tensorflow_uses_gpu() or testing.uses_tpu(),
reason="Flaky on TPU and GPU",
)
class PyDatasetAdapterTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "multithreading",
"workers": 2,
"use_multiprocessing": False,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "single_np",
"dataset_type": "np",
},
{
"testcase_name": "single_tf",
"dataset_type": "tf",
},
{
"testcase_name": "single_jax",
"dataset_type": "jax",
},
{
"testcase_name": "single_torch",
"dataset_type": "torch",
},
],
infinite=[True, False],
shuffle=[True, False],
)
)
def test_basic_flow(
self,
shuffle,
dataset_type,
infinite,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
if use_multiprocessing and shuffle:
pytest.skip("Starting processes is slow, test fewer variants")
set_random_seed(1337)
x = np.random.random((64, 4)).astype("float32")
y = np.array([[i, i] for i in range(64)], dtype="float32")
CPU_DEVICES = {
"tensorflow": "CPU:0",
"jax": "cpu:0",
}
cpu_device = CPU_DEVICES.get(backend.backend(), "cpu")
with backend.device(cpu_device):
if dataset_type == "tf":
x, y = tf.constant(x), tf.constant(y)
elif dataset_type == "jax":
x, y = jax.numpy.array(x), jax.numpy.array(y)
elif dataset_type == "torch":
x, y = torch.as_tensor(x), torch.as_tensor(y)
py_dataset = ExamplePyDataset(
x,
y,
batch_size=16,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
infinite=infinite,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=shuffle
)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.Array if dataset_type == "jax" else np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
sample_order = []
adapter.on_epoch_begin()
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
if infinite:
if len(sample_order) == 64:
adapter.on_epoch_end()
adapter.on_epoch_begin()
elif len(sample_order) >= 128:
break
adapter.on_epoch_end()
expected_order = list(range(64))
if infinite:
self.assertAllClose(sample_order, expected_order + expected_order)
elif shuffle:
self.assertNotAllClose(sample_order, expected_order)
self.assertAllClose(sorted(sample_order), expected_order)
else:
self.assertAllClose(sample_order, expected_order)
# TODO: test sample weights
# TODO: test inference mode (single output)
def test_class_weight(self):
x = np.random.randint(1, 100, (4, 5))
y = np.array([0, 1, 2, 1])
class_w = {0: 2, 1: 1, 2: 3}
py_dataset = ExamplePyDataset(x, y, batch_size=2)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=False, class_weight=class_w
)
if backend.backend() == "tensorflow":
gen = adapter.get_tf_dataset()
elif backend.backend() == "jax":
gen = adapter.get_jax_iterator()
elif backend.backend() == "torch":
gen = adapter.get_torch_dataloader()
else:
gen = adapter.get_numpy_iterator()
for index, batch in enumerate(gen):
# Batch is a tuple of (x, y, class_weight)
self.assertLen(batch, 3)
batch = [backend.convert_to_numpy(x) for x in batch]
# Let's verify the data and class weights match for each element
# of the batch (2 elements in each batch)
for sub_elem in range(2):
self.assertAllEqual(batch[0][sub_elem], x[index * 2 + sub_elem])
self.assertEqual(batch[1][sub_elem], y[index * 2 + sub_elem])
class_key = np.int32(batch[1][sub_elem])
self.assertEqual(batch[2][sub_elem], class_w[class_key])
self.assertEqual(index, 1) # 2 batches
def test_speedup(self):
x = np.random.random((40, 4))
y = np.random.random((40, 2))
no_speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
delay=0.2,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
no_speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
no_speedup_time = time.time() - t0
speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
workers=4,
# TODO: the github actions runner may have performance issue with
# multiprocessing
# use_multiprocessing=True,
max_queue_size=8,
delay=0.2,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
speedup_time = time.time() - t0
self.assertLess(speedup_time, no_speedup_time)
def test_dict_inputs(self):
inputs = {
"x": np.random.random((40, 4)),
"y": np.random.random((40, 2)),
}
py_dataset = DictPyDataset(inputs, batch_size=4)
adapter = py_dataset_adapter.PyDatasetAdapter(py_dataset, shuffle=False)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
ds = adapter.get_tf_dataset()
for batch in ds:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(tuple(bx.shape), (4, 4))
self.assertEqual(tuple(by.shape), (4, 2))
def test_with_different_shapes(self):
class TestPyDataset(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 3
def __getitem__(self, idx):
if idx == 0:
return np.ones([16, 4], "float32"), np.ones(
[16, 2], "float32"
)
if idx == 1:
return np.ones([16, 5], "float32"), np.ones(
[16, 2], "float32"
)
else:
return np.ones([2, 6], "float32"), np.ones(
[2, 2], "float32"
)
adapter = py_dataset_adapter.PyDatasetAdapter(
TestPyDataset(), shuffle=False
)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
else:
it = adapter.get_numpy_iterator()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
},
{
"testcase_name": "multithreading",
"workers": 2,
"max_queue_size": 10,
},
{
"testcase_name": "single",
},
]
)
def test_exception_reported(
self,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
if backend.backend() == "jax" and use_multiprocessing is True:
self.skipTest(
"The CI failed for an unknown reason with "
"`use_multiprocessing=True` in the jax backend"
)
dataset = ExceptionPyDataset(
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
)
adapter = py_dataset_adapter.PyDatasetAdapter(dataset, shuffle=False)
expected_exception_class = ValueError
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
# tf.data wraps the exception
expected_exception_class = tf.errors.InvalidArgumentError
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
else:
it = adapter.get_numpy_iterator()
it = iter(it)
next(it)
next(it)
with self.assertRaisesRegex(
expected_exception_class, "Expected exception"
):
next(it)
def test_iterate_finite(self):
py_dataset = ExamplePyDataset(
np.ones((6, 11), dtype="int32"),
np.zeros((6, 11), dtype="int32"),
batch_size=2,
)
batches = [batch for batch in py_dataset]
self.assertLen(batches, 3)
def test_iterate_infinite_with_none_num_batches(self):
py_dataset = ExamplePyDataset(
np.ones((6, 11), dtype="int32"),
np.zeros((6, 11), dtype="int32"),
batch_size=2,
infinite=True,
)
for index, _ in enumerate(py_dataset):
if index >= 10:
break
def test_iterate_infinite_with_no_len(self):
class NoLenDataset(py_dataset_adapter.PyDataset):
def __getitem__(self, idx):
yield np.ones((2, 11), dtype="int32")
for index, _ in enumerate(NoLenDataset()):
if index >= 10:
break
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/array_data_adapter_test.py | keras/src/trainers/data_adapters/array_data_adapter_test.py | import jax
import jax.experimental.sparse as jax_sparse
import numpy as np
import pandas
import scipy
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import array_data_adapter
class TestArrayDataAdapter(testing.TestCase):
def make_array(self, array_type, shape, dtype):
x = np.array([[i] * shape[1] for i in range(shape[0])], dtype=dtype)
if array_type == "np":
return x
elif array_type == "tf":
return tf.constant(x)
elif array_type == "tf_ragged":
return tf.RaggedTensor.from_tensor(x)
elif array_type == "tf_sparse":
return tf.sparse.from_dense(x)
elif array_type == "jax":
return jax.numpy.array(x)
elif array_type == "jax_sparse":
return jax_sparse.BCOO.fromdense(x)
elif array_type == "torch":
return torch.as_tensor(x)
elif array_type == "pandas_data_frame":
return pandas.DataFrame(x)
elif array_type == "pandas_series":
return pandas.Series(x[:, 0])
elif array_type == "scipy_sparse":
return scipy.sparse.coo_matrix(x)
@parameterized.named_parameters(
named_product(
array_type=[
"np",
"tf",
"tf_ragged",
"tf_sparse",
"jax",
"jax_sparse",
"torch",
"pandas_data_frame",
"pandas_series",
"scipy_sparse",
],
array_dtype=["float32", "float64"],
shuffle=[False, "batch", True],
)
)
def test_basic_flow(self, array_type, array_dtype, shuffle):
x = self.make_array(array_type, (34, 4), array_dtype)
y = self.make_array(array_type, (34, 2), "int32")
xdim1 = 1 if array_type == "pandas_series" else 4
ydim1 = 1 if array_type == "pandas_series" else 2
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
sample_weight=None,
batch_size=16,
steps=None,
shuffle=shuffle,
)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
if array_type == "tf_ragged":
expected_class = tf.RaggedTensor
xdim1 = None
ydim1 = None
elif array_type in ("tf_sparse", "jax_sparse", "scipy_sparse"):
expected_class = tf.SparseTensor
else:
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
if array_type in ("tf_sparse", "jax_sparse", "scipy_sparse"):
expected_class = jax_sparse.JAXSparse
else:
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
x_order = []
y_order = []
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(
backend.standardize_dtype(bx.dtype), backend.floatx()
)
self.assertEqual(backend.standardize_dtype(by.dtype), "int32")
if i < 2:
self.assertEqual(bx.shape, (16, xdim1))
self.assertEqual(by.shape, (16, ydim1))
else:
self.assertEqual(bx.shape, (2, xdim1))
self.assertEqual(by.shape, (2, ydim1))
if isinstance(bx, tf.SparseTensor):
bx = tf.sparse.to_dense(bx)
by = tf.sparse.to_dense(by)
if isinstance(bx, jax_sparse.JAXSparse):
bx = bx.todense()
by = by.todense()
x_batch_order = [float(bx[j, 0]) for j in range(bx.shape[0])]
y_batch_order = [float(by[j, 0]) for j in range(by.shape[0])]
x_order.extend(x_batch_order)
y_order.extend(y_batch_order)
if shuffle == "batch":
self.assertAllClose(
sorted(x_batch_order),
list(range(i * 16, i * 16 + bx.shape[0])),
)
self.assertAllClose(x_order, y_order)
if shuffle:
self.assertNotAllClose(x_order, list(range(34)))
else:
self.assertAllClose(x_order, list(range(34)))
def test_multi_inputs_and_outputs(self):
x1 = np.random.random((34, 1))
x2 = np.random.random((34, 2))
y1 = np.random.random((34, 3))
y2 = np.random.random((34, 4))
sw = np.random.random((34,))
adapter = array_data_adapter.ArrayDataAdapter(
x={"x1": x1, "x2": x2},
y=[y1, y2],
sample_weight=sw,
batch_size=16,
steps=None,
shuffle=False,
)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 3)
bx, by, bw = batch
self.assertIsInstance(bx, dict)
self.assertIsInstance(by, list)
self.assertIsInstance(bw, list)
self.assertIsInstance(bx["x1"], np.ndarray)
self.assertIsInstance(bx["x2"], np.ndarray)
self.assertIsInstance(by[0], np.ndarray)
self.assertIsInstance(by[1], np.ndarray)
self.assertIsInstance(bw[0], np.ndarray)
self.assertIsInstance(bw[1], np.ndarray)
self.assertEqual(bx["x1"].dtype, by[0].dtype)
self.assertEqual(bx["x1"].dtype, backend.floatx())
if i < 2:
self.assertEqual(bx["x1"].shape, (16, 1))
self.assertEqual(bx["x2"].shape, (16, 2))
self.assertEqual(by[0].shape, (16, 3))
self.assertEqual(by[1].shape, (16, 4))
self.assertEqual(bw[0].shape, (16,))
self.assertEqual(bw[1].shape, (16,))
else:
self.assertEqual(bx["x1"].shape, (2, 1))
self.assertEqual(by[0].shape, (2, 3))
self.assertEqual(bw[0].shape, (2,))
self.assertEqual(bw[1].shape, (2,))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 3)
bx, by, bw = batch
self.assertIsInstance(bx, dict)
# NOTE: the y list was converted to a tuple for tf.data
# compatibility.
self.assertIsInstance(by, tuple)
self.assertIsInstance(bw, tuple)
self.assertIsInstance(bx["x1"], tf.Tensor)
self.assertIsInstance(bx["x2"], tf.Tensor)
self.assertIsInstance(by[0], tf.Tensor)
self.assertIsInstance(by[1], tf.Tensor)
self.assertIsInstance(bw[0], tf.Tensor)
self.assertIsInstance(bw[1], tf.Tensor)
self.assertEqual(bx["x1"].dtype, by[0].dtype)
self.assertEqual(bx["x1"].dtype, backend.floatx())
if i < 2:
self.assertEqual(tuple(bx["x1"].shape), (16, 1))
self.assertEqual(tuple(bx["x2"].shape), (16, 2))
self.assertEqual(tuple(by[0].shape), (16, 3))
self.assertEqual(tuple(by[1].shape), (16, 4))
self.assertEqual(tuple(bw[0].shape), (16,))
self.assertEqual(tuple(bw[1].shape), (16,))
else:
self.assertEqual(tuple(bx["x1"].shape), (2, 1))
self.assertEqual(tuple(by[0].shape), (2, 3))
self.assertEqual(tuple(bw[0].shape), (2,))
self.assertEqual(tuple(bw[1].shape), (2,))
@parameterized.named_parameters(
named_product(target_encoding=["int", "categorical"])
)
def test_class_weights(self, target_encoding):
x = np.random.random((4, 2))
if target_encoding == "int":
y = np.array([[0], [1], [2], [3]], dtype="int32")
else:
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
class_weight = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
class_weight=class_weight,
batch_size=16,
)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 3)
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_errors(self):
x = np.random.random((34, 1))
y = np.random.random((34, 3))
sw = np.random.random((34,))
cw = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
with self.assertRaisesRegex(
ValueError, "Expected all elements of `x` to be array-like"
):
array_data_adapter.ArrayDataAdapter(x="Invalid")
with self.assertRaisesRegex(
ValueError, "Expected all elements of `x` to be array-like"
):
array_data_adapter.ArrayDataAdapter(x=x, y="Invalid")
with self.assertRaisesRegex(
ValueError, "Expected all elements of `x` to be array-like"
):
array_data_adapter.ArrayDataAdapter(
x=x, y=y, sample_weight="Invalid"
)
with self.assertRaisesRegex(
ValueError, "You cannot `class_weight` and `sample_weight`"
):
array_data_adapter.ArrayDataAdapter(
x=x, y=y, sample_weight=sw, class_weight=cw
)
nested_y = ({"x": x, "y": y},)
with self.assertRaisesRegex(
ValueError, "You should provide one `sample_weight` array per"
):
array_data_adapter.ArrayDataAdapter(
x=x, y=nested_y, sample_weight=[]
)
tensor_sw = self.make_array("tf", (34, 2), "int32")
with self.assertRaisesRegex(
ValueError, "For a model with multiple outputs, when providing"
):
array_data_adapter.ArrayDataAdapter(
x=x, y=nested_y, sample_weight=tensor_sw
)
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single",
):
array_data_adapter.ArrayDataAdapter(
x=x, y=nested_y, class_weight=cw
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py | keras/src/trainers/data_adapters/tf_dataset_adapter_test.py | from unittest import mock
import jax
import numpy as np
import pytest
import tensorflow as tf
import torch
from keras.src import Sequential
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.trainers.data_adapters import tf_dataset_adapter
class TestTFDatasetAdapter(testing.TestCase):
def test_basic_flow(self):
x = tf.random.normal((34, 4))
y = tf.random.normal((34, 2))
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
def _test_class_weights(self, target_encoding="int"):
x = np.random.random((4, 2))
if target_encoding == "int":
y = np.array([[0], [1], [2], [3]], dtype="int64")
else:
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
class_weight = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 3)
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_class_weights_int_targets(self):
self._test_class_weights(target_encoding="int")
def test_class_weights_categorical_targets(self):
self._test_class_weights(target_encoding="categorical")
def test_builtin_prefetch(self):
dataset = tf.data.Dataset.range(42)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertTrue(adapter.builtin_prefetch)
def test_num_batches(self):
dataset = tf.data.Dataset.range(42)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, 42)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, 42)
# Test for Infinite Cardinality
dataset = tf.data.Dataset.range(42)
dataset = dataset.repeat()
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.INFINITE_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
# Test for Unknown Cardinality
dataset = dataset.filter(lambda x: True)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.UNKNOWN_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
def test_invalid_dataset_type(self):
with self.assertRaisesRegex(
ValueError, "Expected argument `dataset` to be a tf.data.Dataset"
):
invalid_data = "This is not a tf.data.Dataset"
tf_dataset_adapter.TFDatasetAdapter(invalid_data)
def test_class_weight_and_sample_weight_together(self):
x = np.random.random((4, 2))
y = np.array([[0], [1], [2], [3]], dtype="int64")
sw = np.array([0.5, 0.5, 0.5, 0.5])
base_ds = tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_different_y_shapes_with_class_weight(self):
x = np.random.random((4, 2))
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
y_sparse = np.array([0, 1, 2, 3], dtype="int64")
base_ds = tf.data.Dataset.from_tensor_slices((x, y_sparse)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_nested_y_with_class_weight(self):
x = np.random.random((4, 2))
# Define two target outputs, y1 and y2, for the dataset
y1 = np.array([0, 1, 2, 3], dtype="int64")
y2 = np.array([0, 1, 2, 3], dtype="int64")
# Create a tf.data Dataset from the input data and two target outputs
base_ds = tf.data.Dataset.from_tensor_slices((x, (y1, y2))).batch(16)
# Define class weights for potential classes in the output
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_class_weights_map_fn_with_sample_weight(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5], [0.5, 0.5]])
y = np.array([[1, 0], [0, 1]])
sw = np.array([1.0, 1.0])
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
class_weights_map_fn(x, y, sw)
def test_class_weights_map_fn_nested_y(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5]])
y1 = np.array([1])
y2 = np.array([0])
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
class_weights_map_fn(x, (y1, y2))
def test_distribute_dataset(self):
x = tf.random.normal((34, 4))
y = tf.random.normal((34, 2))
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
data_distribution = mock.Mock()
# Mimic that there are 2 worker, and each of the worker will get batch
# size of 8
data_distribution.distribute_dataset = mock.MagicMock(
return_value=base_ds.rebatch(8).shard(2, index=0)
)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, distribution=data_distribution
)
self.assertEqual(adapter.num_batches, None)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(bx.shape, (8, 4))
self.assertEqual(by.shape, (8, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(tuple(bx.shape), (8, 4))
self.assertEqual(tuple(by.shape), (8, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS and backend.backend() != "numpy",
reason="Backend does not support sparse tensors",
)
def test_tf_sparse_tensors(self):
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 4)
)
y = tf.SparseTensor(
indices=[[0, 0], [1, 1]], values=[3.0, 4.0], dense_shape=(2, 2)
)
base_ds = tf.data.Dataset.from_tensors((x, y))
adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.SparseTensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.experimental.sparse.BCOO
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
def test_distributed_datasets_from_function_adapter_properties(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0"])
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=2
)
x = tf.random.uniform((32, 4))
y = tf.random.uniform((32, 2))
return tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
adapter = tf_dataset_adapter.TFDatasetAdapter(dist_dataset)
self.assertEqual(adapter.num_batches, 16)
self.assertIsNone(adapter.batch_size)
self.assertIsNone(adapter.has_partial_batch)
self.assertIsNone(adapter.partial_batch_size)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
batch_count = 0
for batch in it:
batch_count += 1
self.assertEqual(len(batch), 2)
data, labels = batch
self.assertIsInstance(data, expected_class)
self.assertIsInstance(labels, expected_class)
self.assertEqual(data.shape, (2, 4))
self.assertEqual(labels.shape, (2, 2))
self.assertEqual(batch_count, 16)
@pytest.mark.requires_trainable_backend
def test_distributed_datasets_from_function_model_integration(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0"])
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=2
)
x = tf.random.uniform((4, 1))
y = tf.random.uniform((4, 2))
return tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
model = Sequential([layers.Dense(2, input_shape=(1,))])
model.compile(optimizer="adam", loss="mse")
history = model.fit(dist_dataset, epochs=1)
self.assertIn("loss", history.history)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/torch_data_loader_adapter.py | keras/src/trainers/data_adapters/torch_data_loader_adapter.py | import itertools
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TorchDataLoaderAdapter(DataAdapter):
"""Adapter that handles `torch.utils.data.DataLoader`."""
def __init__(self, dataloader):
import torch
if not isinstance(dataloader, torch.utils.data.DataLoader):
raise ValueError(
f"Expected argument `dataloader` to be an instance of"
f"`torch.utils.data.DataLoader`. Received: {dataloader}"
)
self._dataloader = dataloader
self._output_signature = None
self._batch_size = dataloader.batch_size
self._num_batches = None
self._partial_batch_size = None
if hasattr(dataloader.dataset, "__len__"):
self._num_batches = len(dataloader)
if self._batch_size is not None:
self._partial_batch_size = (
len(dataloader.dataset) % self._batch_size
)
def get_numpy_iterator(self):
for batch in self._dataloader:
# shared memory using `np.asarray`
yield tuple(
tree.map_structure(
lambda x: np.asarray(x.cpu()), batch, none_is_leaf=False
)
)
def get_jax_iterator(self):
# We use numpy as an intermediary because it is faster.
return self.get_numpy_iterator()
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
if self._output_signature is None:
batches = list(
itertools.islice(
self._dataloader,
data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC,
)
)
self._output_signature = tuple(
data_adapter_utils.get_tensor_spec(batches)
)
return tf.data.Dataset.from_generator(
self.get_numpy_iterator,
output_signature=self._output_signature,
)
def get_torch_dataloader(self):
return self._dataloader
@property
def builtin_prefetch(self):
prefetch_factor = self._dataloader.prefetch_factor
if prefetch_factor is not None and prefetch_factor > 0:
return True
else:
return False
@property
def num_batches(self):
return self._num_batches
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
if self._partial_batch_size:
return self._partial_batch_size > 0
else:
return None
@property
def partial_batch_size(self):
return self._partial_batch_size
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py | keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py | import math
import numpy as np
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters.torch_data_loader_adapter import (
TorchDataLoaderAdapter,
)
class TestTorchDataLoaderAdapter(testing.TestCase):
def test_basic_dataloader(self):
x = torch.normal(2, 3, size=(34, 4))
y = torch.normal(1, 3, size=(34, 2))
ds = torch.utils.data.TensorDataset(x, y)
dataloader = torch.utils.data.DataLoader(ds, batch_size=16)
adapter = TorchDataLoaderAdapter(dataloader)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
named_product(batch_size=[None, 3], implements_len=[True, False])
)
def test_dataloader_iterable_dataset(self, batch_size, implements_len):
class TestIterableDataset(torch.utils.data.IterableDataset):
def __init__(self):
self.x = torch.normal(2, 3, size=(16, 4))
self.y = torch.normal(1, 3, size=(16, 2))
def __iter__(self):
for _ in range(10):
yield (self.x, self.y)
class TestIterableDatasetWithLen(TestIterableDataset):
def __len__(self):
return 10
ds = (
TestIterableDatasetWithLen()
if implements_len
else TestIterableDataset()
)
dataloader = torch.utils.data.DataLoader(ds, batch_size=batch_size)
adapter = TorchDataLoaderAdapter(dataloader)
if implements_len and batch_size:
self.assertEqual(adapter.num_batches, math.ceil(10 / batch_size))
self.assertEqual(adapter.batch_size, batch_size)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 10 % batch_size)
elif implements_len:
self.assertEqual(adapter.num_batches, 10)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
else:
self.assertIsNone(adapter.num_batches)
self.assertEqual(adapter.batch_size, batch_size)
self.assertIsNone(adapter.has_partial_batch)
self.assertIsNone(adapter.partial_batch_size)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
batch_count = 0
for i, batch in enumerate(it):
batch_count += 1
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if batch_size:
if i < 3:
self.assertEqual(bx.shape, (batch_size, 16, 4))
self.assertEqual(by.shape, (batch_size, 16, 2))
else:
self.assertEqual(bx.shape, (10 % batch_size, 16, 4))
self.assertEqual(by.shape, (10 % batch_size, 16, 2))
else:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
if batch_size:
self.assertEqual(batch_count, math.ceil(10 / batch_size))
else:
self.assertEqual(batch_count, 10)
def test_with_different_shapes(self):
x = (
[np.ones([4], "float32")] * 16
+ [np.ones([5], "float32")] * 16
+ [np.ones([6], "float32")] * 2
)
y = np.ones((34, 2), "float32")
ds = torch.utils.data.StackDataset(x, y)
dataloader = torch.utils.data.DataLoader(ds, batch_size=16)
adapter = TorchDataLoaderAdapter(dataloader)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
else:
it = adapter.get_numpy_iterator()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(named_product(num_workers=[0, 2]))
def test_builtin_prefetch(self, num_workers):
x = torch.normal(2, 3, size=(34, 4))
y = torch.normal(1, 3, size=(34, 2))
ds = torch.utils.data.TensorDataset(x, y)
dataloader = torch.utils.data.DataLoader(
ds, batch_size=16, num_workers=num_workers
)
adapter = TorchDataLoaderAdapter(dataloader)
if num_workers > 0:
self.assertTrue(adapter.builtin_prefetch)
else:
self.assertFalse(adapter.builtin_prefetch)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/__init__.py | keras/src/trainers/data_adapters/__init__.py | import types
from keras.src.distribution import distribution_lib
from keras.src.trainers.data_adapters import array_data_adapter
from keras.src.trainers.data_adapters import data_adapter
from keras.src.trainers.data_adapters import py_dataset_adapter
from keras.src.trainers.data_adapters.array_data_adapter import ArrayDataAdapter
from keras.src.trainers.data_adapters.generator_data_adapter import (
GeneratorDataAdapter,
)
from keras.src.trainers.data_adapters.grain_dataset_adapter import (
GrainDatasetAdapter,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDatasetAdapter
from keras.src.trainers.data_adapters.tf_dataset_adapter import TFDatasetAdapter
from keras.src.trainers.data_adapters.torch_data_loader_adapter import (
TorchDataLoaderAdapter,
)
def get_data_adapter(
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
):
# Allow passing a custom data adapter.
if isinstance(x, data_adapter.DataAdapter):
return x
# Check for multi-process/worker distribution.
distribution = distribution_lib.distribution()
if (
distribution is not None
and getattr(distribution, "_is_multi_process", False)
and getattr(distribution, "auto_shard_dataset", False)
and not is_tf_dataset(x)
):
raise ValueError(
"When using a multi-worker distribution with auto-sharding enabled, "
"the data must be provided as a `tf.data.Dataset` instance. "
f"Received: type(x)={type(x)}. "
"If the dataset is already sharded across workers, then set "
"`distribution.auto_shard_dataset = False`."
)
if array_data_adapter.can_convert_arrays((x, y, sample_weight)):
return ArrayDataAdapter(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
shuffle=shuffle,
batch_size=batch_size,
steps=steps_per_epoch,
)
elif is_tf_dataset(x):
# Unsupported args: y, sample_weight, shuffle
if y is not None:
raise_unsupported_arg("y", "the targets", "tf.data.Dataset")
if sample_weight is not None:
raise_unsupported_arg(
"sample_weights", "the sample weights", "tf.data.Dataset"
)
return TFDatasetAdapter(
x, class_weight=class_weight, distribution=distribution
)
# TODO: should we warn or not?
# warnings.warn(
# "`shuffle=True` was passed, but will be ignored since the "
# "data `x` was provided as a tf.data.Dataset. The Dataset is "
# "expected to already be shuffled "
# "(via `.shuffle(tf.data.AUTOTUNE)`)"
# )
elif isinstance(x, py_dataset_adapter.PyDataset):
if y is not None:
raise_unsupported_arg("y", "the targets", "PyDataset")
if sample_weight is not None:
raise_unsupported_arg(
"sample_weights", "the sample weights", "PyDataset"
)
return PyDatasetAdapter(x, class_weight=class_weight, shuffle=shuffle)
# TODO: should we warn or not?
# if x.num_batches is None and shuffle:
# warnings.warn(
# "`shuffle=True` was passed, but will be ignored since the "
# "data `x` was provided as a infinite PyDataset. The "
# "PyDataset is expected to already be shuffled."
# )
elif is_torch_dataloader(x):
if y is not None:
raise_unsupported_arg("y", "the targets", "torch DataLoader")
if sample_weight is not None:
raise_unsupported_arg(
"sample_weights", "the sample weights", "torch DataLoader"
)
if class_weight is not None:
raise ValueError(
"Argument `class_weight` is not supported for torch "
f"DataLoader inputs. You can modify your `__getitem__ ` method"
" to return input tensor, label and class_weight. "
"Alternatively, use a custom training loop. See the User Guide "
"https://keras.io/guides/custom_train_step_in_torch/"
"#supporting-sampleweight-amp-classweight for more details. "
f"Received: class_weight={class_weight}"
)
return TorchDataLoaderAdapter(x)
# TODO: should we warn or not?
# warnings.warn(
# "`shuffle=True` was passed, but will be ignored since the "
# "data `x` was provided as a torch DataLoader. The DataLoader "
# "is expected to already be shuffled."
# )
elif is_grain_dataset(x):
if y is not None:
raise_unsupported_arg(
"y", "the targets", "grain.Dataset and grain.DataLoader"
)
if sample_weight is not None:
raise_unsupported_arg(
"sample_weights",
"the sample weights",
"grain.Dataset and grain.DataLoader",
)
if class_weight is not None:
raise ValueError(
"Argument `class_weight` is not supported for grain.Dataset "
f"and grain.DataLoader inputs. You can modify your "
"`__getitem__ ` method to return input tensor, label and "
"class_weight. "
f"Received: class_weight={class_weight}"
)
return GrainDatasetAdapter(x)
# TODO: should we warn or not?
# warnings.warn(
# "`shuffle=True` was passed, but will be ignored since the "
# "data `x` was provided as a grain dataset. The grain dataset "
# "is expected to already be shuffled."
# )
elif isinstance(x, types.GeneratorType):
if y is not None:
raise_unsupported_arg("y", "the targets", "PyDataset")
if sample_weight is not None:
raise_unsupported_arg(
"sample_weights", "the sample weights", "PyDataset"
)
if class_weight is not None:
raise ValueError(
"Argument `class_weight` is not supported for Python "
f"generator inputs. Received: class_weight={class_weight}"
)
return GeneratorDataAdapter(x)
# TODO: should we warn or not?
# warnings.warn(
# "`shuffle=True` was passed, but will be ignored since the "
# "data `x` was provided as a generator. The generator "
# "is expected to yield already-shuffled data."
# )
else:
raise ValueError(f"Unrecognized data type: x={x} (of type {type(x)})")
def raise_unsupported_arg(arg_name, arg_description, input_type):
raise ValueError(
f"When providing `x` as a {input_type}, `{arg_name}` "
f"should not be passed. Instead, {arg_description} should "
f"be included as part of the {input_type}."
)
def is_tf_dataset(x):
if hasattr(x, "__class__"):
for parent in x.__class__.__mro__:
if parent.__name__ in (
"DatasetV2",
"DistributedDataset",
"DistributedDatasetsFromFunction",
) and "tensorflow.python." in str(parent.__module__):
return True
return False
def is_torch_dataloader(x):
if hasattr(x, "__class__"):
for parent in x.__class__.__mro__:
if parent.__name__ == "DataLoader" and "torch.utils.data" in str(
parent.__module__
):
return True
return False
def is_grain_dataset(x):
if hasattr(x, "__class__"):
for parent in x.__class__.__mro__:
if parent.__name__ in (
"MapDataset",
"IterDataset",
"DataLoader",
) and "grain" in str(parent.__module__):
return True
return False
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/data_adapter.py | keras/src/trainers/data_adapters/data_adapter.py | class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unified interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields arrays that
that can be fed to JAX. NumPy arrays are preferred for performance.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def builtin_prefetch(self):
"""Whether the DataAdapter has built-in prefetching capabilities.
Prefetching is an optimization technique where data is loaded and
prepared in advance while the model is processing the current batch,
reducing training time by overlapping data loading with computation.
Returns:
bool: True if the DataAdapter implements its own prefetching
mechanism and handles data loading asynchronously. False if the
caller should implement prefetching externally.
"""
return False
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_begin(self):
"""A hook called before each epoch."""
pass
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/array_data_adapter.py | keras/src/trainers/data_adapters/array_data_adapter.py | import functools
import math
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import array_slicing
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class ArrayDataAdapter(DataAdapter):
"""Adapter for array-like objects, e.g. TF/JAX Tensors, NumPy arrays."""
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps=None,
shuffle=False,
class_weight=None,
):
if not can_convert_arrays((x, y, sample_weight)):
raise ValueError(
"Expected all elements of `x` to be array-like. "
f"Received invalid types: x={x}"
)
if sample_weight is not None:
if class_weight is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
if isinstance(sample_weight, (list, tuple, dict)):
try:
tree.assert_same_structure(y, sample_weight)
except ValueError:
raise ValueError(
"You should provide one `sample_weight` array per "
"output in `y`. The two structures did not match:\n"
f"- y: {y}\n"
f"- sample_weight: {sample_weight}\n"
)
else:
is_samplewise = len(sample_weight.shape) == 1 or (
len(sample_weight.shape) == 2
and sample_weight.shape[1] == 1
)
if not is_samplewise:
raise ValueError(
"For a model with multiple outputs, when providing "
"a single `sample_weight` array, it should only "
"have one scalar score per sample "
"(i.e. shape `(num_samples,)`). If you want to use "
"non-scalar sample weights, pass a `sample_weight` "
"argument with one array per model output."
)
# Replicate the same sample_weight array on all outputs.
sample_weight = tree.map_structure(
lambda _: sample_weight, y
)
if class_weight is not None:
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
inputs = data_adapter_utils.pack_x_y_sample_weight(x, y, sample_weight)
data_adapter_utils.check_data_cardinality(inputs)
num_samples = set(
i.shape[0] for i in tree.flatten(inputs) if i is not None
).pop()
self._num_samples = num_samples
self._inputs = inputs
# If batch_size is not passed but steps is, calculate from the input
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._partial_batch_size = num_samples % batch_size
self._shuffle = shuffle
def get_numpy_iterator(self):
inputs = array_slicing.convert_to_sliceable(
self._inputs, target_backend="numpy"
)
def slice_and_convert_to_numpy(sliceable, indices=None):
x = sliceable[indices]
x = sliceable.convert_to_numpy(x)
return x
return self._get_iterator(slice_and_convert_to_numpy, inputs)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
shuffle = self._shuffle
batch_size = self._batch_size
num_samples = self._num_samples
num_full_batches = int(self._num_samples // batch_size)
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = tf.data.Dataset.range(1)
def permutation(_):
# It turns out to be more performant to make a new set of indices
# rather than reusing the same range Tensor. (presumably because of
# buffer forwarding.)
indices = tf.range(num_samples, dtype=tf.int64)
if shuffle and shuffle != "batch":
indices = tf.random.shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take
# quite a while so we don't want to wait for prefetching over an epoch
# boundary to trigger the next permutation. On the other hand, too many
# simultaneous shuffles can contend on a hardware level and degrade all
# performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is
to slice the Tensor in a Dataset map. (With a condition on the upper
index to handle the partial batch.) However it turns out that
coercing the Tensor into a shape which is divisible by the batch
size (and handling the last partial batch separately) allows for a
much more favorable memory access pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire
epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = tf.slice(indices, [0], [num_in_full_batch])
first_k_indices = tf.reshape(
first_k_indices, [num_full_batches, batch_size]
)
flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = tf.data.Dataset.from_tensors(
tf.slice(
indices, [num_in_full_batch], [self._partial_batch_size]
)
)
flat_dataset = flat_dataset.concatenate(index_remainder)
return flat_dataset
def slice_inputs(indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices.
inputs: A python data structure that contains the inputs,
targets, and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
inputs = array_slicing.convert_to_sliceable(
self._inputs, target_backend="tensorflow"
)
inputs = tree.lists_to_tuples(inputs)
dataset = tf.data.Dataset.zip(
(indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat())
)
def grab_batch(i, data):
def grab_one(x):
if isinstance(x, array_slicing.TensorflowSparseWrapper):
return array_slicing.slice_tensorflow_sparse_wrapper(
x, i
)
if isinstance(x, (list, tuple, dict)):
return None
if tf.is_tensor(x):
return tf.gather(x, i, axis=0)
return x
return tree.traverse(grab_one, data)
dataset = dataset.map(
grab_batch, num_parallel_calls=tf.data.AUTOTUNE
)
# Default optimizations are disabled to avoid the overhead of
# (unnecessary) input pipeline graph serialization & deserialization
options = tf.data.Options()
options.experimental_optimization.apply_default_optimizations = (
False
)
if self._shuffle:
options.experimental_external_state_policy = (
tf.data.experimental.ExternalStatePolicy.IGNORE
)
dataset = dataset.with_options(options)
return dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
if shuffle == "batch":
indices_dataset = indices_dataset.map(tf.random.shuffle)
dataset = slice_inputs(indices_dataset, self._inputs)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
dataset = dataset.with_options(options)
return dataset.prefetch(tf.data.AUTOTUNE)
def get_jax_iterator(self):
inputs = array_slicing.convert_to_sliceable(
self._inputs, target_backend="jax"
)
def slice_and_convert_to_jax(sliceable, indices=None):
x = sliceable[indices]
x = sliceable.convert_to_jax_compatible(x)
return x
return self._get_iterator(slice_and_convert_to_jax, inputs)
def get_torch_dataloader(self):
import torch
from keras.src.backend.torch.core import convert_to_tensor
class ArrayDataset(torch.utils.data.Dataset):
def __init__(self, array):
self.array = array
def __getitems__(self, indices):
def slice_and_convert(sliceable):
x = sliceable[indices]
x = sliceable.convert_to_torch_compatible(x)
x = convert_to_tensor(x)
return x
return tree.map_structure(
slice_and_convert, self.array, none_is_leaf=False
)
def __len__(self):
return len(self.array[0])
class RandomBatchSampler(torch.utils.data.Sampler):
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
for batch in self.sampler:
yield [batch[i] for i in torch.randperm(len(batch))]
def __len__(self):
return len(self.sampler)
if self._shuffle == "batch":
batch_sampler = RandomBatchSampler(
torch.utils.data.BatchSampler(
range(self._num_samples),
batch_size=self._batch_size,
drop_last=False,
)
)
elif self._shuffle:
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(range(self._num_samples)),
batch_size=self._batch_size,
drop_last=False,
)
else:
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(range(self._num_samples)),
batch_size=self._batch_size,
drop_last=False,
)
# Because ArrayDataset.__getitems__ returns full batches organized in
# the expected structure, there is nothing to collate.
def no_op_collate(batch):
return batch
inputs = array_slicing.convert_to_sliceable(
self._inputs, target_backend="torch"
)
dataset = ArrayDataset(inputs)
return torch.utils.data.DataLoader(
dataset, batch_sampler=batch_sampler, collate_fn=no_op_collate
)
def _get_iterator(self, slice_and_convert_fn, inputs):
global_permutation = None
if self._shuffle and self._shuffle != "batch":
global_permutation = np.random.permutation(self._num_samples)
for i in range(self._size):
start = i * self._batch_size
stop = min((i + 1) * self._batch_size, self._num_samples)
if self._shuffle == "batch":
indices = np.random.permutation(stop - start) + start
elif self._shuffle:
indices = global_permutation[start:stop]
else:
indices = slice(start, stop)
slice_indices_and_convert_fn = functools.partial(
slice_and_convert_fn, indices=indices
)
yield tree.map_structure(
slice_indices_and_convert_fn, inputs, none_is_leaf=False
)
@property
def num_batches(self):
return self._size
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
return self._partial_batch_size > 0
@property
def partial_batch_size(self):
return self._partial_batch_size or None
def can_convert_arrays(arrays):
"""Check if array like-inputs can be handled by `ArrayDataAdapter`
Args:
inputs: Structure of `Tensor`s, NumPy arrays, or tensor-like.
Returns:
`True` if `arrays` can be handled by `ArrayDataAdapter`, `False`
otherwise.
"""
return all(
tree.flatten(tree.map_structure(array_slicing.can_slice_array, arrays))
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/grain_dataset_adapter.py | keras/src/trainers/data_adapters/grain_dataset_adapter.py | import itertools
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
from keras.src.utils.module_utils import grain
from keras.src.utils.module_utils import tensorflow as tf
class GrainDatasetAdapter(DataAdapter):
"""Adapter that handles `grain.DataLoader`, `grain.MapDataset` and
`grain.IterDataset`.
"""
def __init__(self, dataset):
"""Initialize the GrainDatasetAdapter.
Args:
dataset: A Grain dataset instance. Must be one of
`grain.DataLoader`, `grain.MapDataset`, or `grain.IterDataset`.
"""
if not isinstance(
dataset, (grain.MapDataset, grain.IterDataset, grain.DataLoader)
):
raise ValueError(
"Expected `dataset` to be a grain.MapDataset, "
"grain.IterDataset or grain.DataLoader. "
f"Received: {dataset} of type {type(dataset)}"
)
self._dataset = dataset
batch_size, output_signature = self._get_dataset_info(dataset)
self._batch_size = batch_size
self._output_signature = output_signature
self._output_tf_signature = None
def _get_dataset_info(self, dataset):
"""Get the `batch_size` and `output_signature` from the dataset.
We use a small list of batches to infer the `batch_size` and
`output_signature`.
"""
batches = list(
itertools.islice(
dataset, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
output_signature = data_adapter_utils.get_keras_tensor_spec(batches)
flat_output_signature = tree.flatten(output_signature)
batch_size = flat_output_signature[0].shape[0]
if batch_size is not None:
batch_size = int(batch_size)
return batch_size, output_signature
def get_numpy_iterator(self):
from grain._src.python.shared_memory_array import (
SharedMemoryArrayMetadata,
)
def convert_to_numpy(x):
if isinstance(x, (np.ndarray, SharedMemoryArrayMetadata)):
return x
else:
# Using `__array__` should handle `tf.Tensor`, `jax.np.ndarray`,
# `torch.Tensor`, as well as any other tensor-like object that
# has added numpy support.
if hasattr(x, "__array__"):
if data_adapter_utils.is_torch_tensor(x):
x = x.cpu()
x = np.asarray(x)
return x
class ConvertToNumpy(grain.transforms.Map):
def map(self, x):
return tree.map_structure(
convert_to_numpy, x, none_is_leaf=False
)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToNumpy())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToNumpy`.
operations=list(self._dataset._operations) + [ConvertToNumpy()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
return dataset
def get_jax_iterator(self):
def convert_to_jax_compatible(x):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_jax_sparse(x)
elif data_adapter_utils.is_tensorflow_sparse(x):
x = data_adapter_utils.tf_sparse_to_jax_sparse(x)
return x
class ConvertToJaxCompatible(grain.transforms.Map):
def map(self, x):
return tree.map_structure(
convert_to_jax_compatible, x, none_is_leaf=False
)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToJaxCompatible())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToJaxCompatible`.
operations=list(self._dataset._operations)
+ [ConvertToJaxCompatible()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
return dataset
def get_tf_dataset(self):
def convert_to_tf(x):
if x is None:
return tf.experimental.Optional.empty(None)
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
return x
class ConvertToTF(grain.transforms.Map):
def map(self, x):
return tree.map_structure(convert_to_tf, x)
# `tf.data.Dataset.from_generator` does not support lists as output.
# We convert lists to tuples.
class ListToTuple(grain.transforms.Map):
def map(self, x):
return tree.lists_to_tuples(x)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToTF())
dataset = dataset.map(ListToTuple())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToTF` and `ListToTuple`.
operations=list(self._dataset._operations)
+ [ConvertToTF(), ListToTuple()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
if self._output_tf_signature is None:
self._output_tf_signature = tree.map_structure(
data_adapter_utils.convert_to_tf_tensor_spec,
self._output_signature,
)
return tf.data.Dataset.from_generator(
lambda: dataset, output_signature=self._output_tf_signature
)
def get_torch_dataloader(self):
import torch.utils.data as torch_data
class ConverterIterableDataset(torch_data.IterableDataset):
def __init__(self, iterable):
super().__init__()
self.iterable = iterable
def __iter__(self):
return iter(self.iterable)
# `batch_size=None` indicates that we should not re-batch
return torch_data.DataLoader(
ConverterIterableDataset(self._dataset), batch_size=None
)
@property
def builtin_prefetch(self):
return True
@property
def num_batches(self):
return None
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/data_adapter_utils.py | keras/src/trainers/data_adapters/data_adapter_utils.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
NUM_BATCHES_FOR_TENSOR_SPEC = 2
@keras_export("keras.utils.unpack_x_y_sample_weight")
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Example:
>>> features_batch = ops.ones((10, 5))
>>> labels_batch = ops.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are
not provided.
"""
if isinstance(data, list):
data = tuple(data)
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
error_msg = (
"Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
f"or `(x, y, sample_weight)`, found: {data}"
)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight")
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit()` uses.
Example:
>>> x = ops.ones((10, 1))
>>> data = pack_x_y_sample_weight(x)
>>> isinstance(data, ops.Tensor)
True
>>> y = ops.ones((10, 1))
>>> data = pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit()`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unnecessary tuple.
if not isinstance(x, (tuple, list)):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def list_to_tuple(maybe_list):
"""Datasets will stack any list of tensors, so we convert them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
def check_data_cardinality(data):
num_samples = set(
int(i.shape[0]) for i in tree.flatten(data) if i is not None
)
if len(num_samples) > 1:
msg = (
"Data cardinality is ambiguous. "
"Make sure all arrays contain the same number of samples."
)
for label, single_data in zip(["x", "y", "sample_weight"], data):
sizes = ", ".join(
str(i.shape[0]) for i in tree.flatten(single_data)
)
msg += f"'{label}' sizes: {sizes}\n"
raise ValueError(msg)
def class_weight_to_sample_weights(y, class_weight):
# Convert to numpy to ensure consistent handling of operations
# (e.g., np.round()) across frameworks like TensorFlow, JAX, and PyTorch
y_numpy = ops.convert_to_numpy(y)
sample_weight = np.ones(shape=(y_numpy.shape[0],), dtype=backend.floatx())
if len(y_numpy.shape) > 1:
if y_numpy.shape[-1] != 1:
y_numpy = np.argmax(y_numpy, axis=-1)
else:
y_numpy = np.squeeze(y_numpy, axis=-1)
y_numpy = np.round(y_numpy).astype("int32")
for i in range(y_numpy.shape[0]):
sample_weight[i] = class_weight.get(int(y_numpy[i]), 1.0)
return sample_weight
def get_keras_tensor_spec(batches):
"""Return the KerasTensor spec for a list of batches.
The spec is represented using `KerasTensor` which could handle dense, sparse
or ragged tensors.
Args:
batches: list of structures of tensors. The structures must be
identical, but the shape at each leaf may be different.
Returns:
A nested structure of `KerasTensor`.
"""
def get_single_tensor_spec(*tensors):
x = tensors[0]
if not hasattr(x, "shape"):
# Try to convert to a numpy array.
x = np.array(x)
rank = len(x.shape)
if rank < 1:
raise ValueError(
"When passing a dataset to a Keras model, the arrays must "
f"be at least rank 1. Received: {x} of rank {len(x.shape)}."
)
for t in tensors:
if len(t.shape) != rank:
raise ValueError(
"When passing a dataset to a Keras model, the "
"corresponding arrays in each batch must have the same "
f"rank. Received: {x} and {t}"
)
shape = []
# Merge shapes: go through each dimension one by one and keep the
# common values
for dims in zip(*[list(x.shape) for x in tensors]):
dims_set = set(dims)
shape.append(dims_set.pop() if len(dims_set) == 1 else None)
dtype = backend.standardize_dtype(x.dtype)
if is_tensorflow_ragged(x):
return backend.KerasTensor(
shape=shape,
dtype=dtype,
ragged=True,
ragged_rank=x.ragged_rank,
row_splits_dtype=x.row_splits.dtype,
)
if is_tensorflow_sparse(x) or is_scipy_sparse(x) or is_jax_sparse(x):
return backend.KerasTensor(shape=shape, dtype=dtype, sparse=True)
else:
return backend.KerasTensor(shape=shape, dtype=dtype)
return tree.map_structure(
get_single_tensor_spec, *batches, none_is_leaf=False
)
def convert_to_tf_tensor_spec(keras_tensor, batch_axis_to_none=True):
"""Convert a KerasTensor to a TensorSpec.
Args:
keras_tensor: A KerasTensor instance.
batch_axis_to_none: If `True`, the batch axis of the returned
tensor spec will be set to None. Defaults to `True`.
"""
from keras.src.utils.module_utils import tensorflow as tf
if keras_tensor is None:
return tf.OptionalSpec(None)
if not isinstance(keras_tensor, backend.KerasTensor):
raise TypeError(
f"Expected a KerasTensor, but got {keras_tensor} of type "
f"{type(keras_tensor)}."
)
shape = list(keras_tensor.shape)
if batch_axis_to_none:
shape[0] = None
if keras_tensor.ragged:
return tf.RaggedTensorSpec(
shape=shape,
dtype=keras_tensor.dtype,
ragged_rank=keras_tensor.ragged_rank,
row_splits_dtype=keras_tensor.row_splits_dtype,
)
elif keras_tensor.sparse:
return tf.SparseTensorSpec(shape=shape, dtype=keras_tensor.dtype)
else:
return tf.TensorSpec(shape=shape, dtype=keras_tensor.dtype)
def get_tensor_spec(batches):
"""Return the common tensor spec for a list of batches.
The spec is represented using `tf.TensorSpec`, `tf.SparseTensorSpec` and
`tf.RaggedTensorSpec`.
Args:
batches: list of structures of tensors. The structures must be
identical, but the shape at each leaf may be different.
Returns:
A common tensor spec.
"""
tensor_specs = get_keras_tensor_spec(batches)
return tree.map_structure(convert_to_tf_tensor_spec, tensor_specs)
def get_jax_iterator(iterable):
import jax
import jax.experimental.sparse as jax_sparse
def convert_to_jax_compatible(x):
if isinstance(x, (jax.Array, jax_sparse.JAXSparse, np.ndarray)):
return x
elif is_scipy_sparse(x):
return scipy_sparse_to_jax_sparse(x)
elif is_tensorflow_sparse(x):
return tf_sparse_to_jax_sparse(x)
else:
return np.asarray(x)
for batch in iterable:
yield tree.map_structure(
convert_to_jax_compatible, batch, none_is_leaf=False
)
def get_numpy_iterator(iterable):
def convert_to_numpy(x):
if not isinstance(x, np.ndarray):
# Using `__array__` should handle `tf.Tensor`, `jax.np.ndarray`,
# `torch.Tensor`, as well as any other tensor-like object that
# has added numpy support.
if hasattr(x, "__array__"):
if is_torch_tensor(x):
x = x.cpu()
x = np.asarray(x)
return x
for batch in iterable:
yield tree.map_structure(convert_to_numpy, batch, none_is_leaf=False)
def get_torch_dataloader(iterable):
import torch.utils.data as torch_data
from keras.src.backend.torch.core import convert_to_tensor
class ConverterIterableDataset(torch_data.IterableDataset):
def __init__(self, iterable):
self.iterable = iterable
def __iter__(self):
for batch in self.iterable:
yield tree.map_structure(
convert_to_tensor, batch, none_is_leaf=False
)
dataset = ConverterIterableDataset(iterable)
# `batch_size=None` indicates that we should not re-batch
return torch_data.DataLoader(dataset, batch_size=None)
def is_tensorflow_tensor(value):
if hasattr(value, "__class__"):
if value.__class__.__name__ in ("RaggedTensor", "SparseTensor"):
return "tensorflow.python." in str(value.__class__.__module__)
for parent in value.__class__.__mro__:
if parent.__name__ in ("Tensor") and "tensorflow.python." in str(
parent.__module__
):
return True
return False
def is_tensorflow_ragged(value):
if hasattr(value, "__class__"):
return (
value.__class__.__name__ == "RaggedTensor"
and "tensorflow.python." in str(value.__class__.__module__)
)
return False
def is_tensorflow_sparse(value):
if hasattr(value, "__class__"):
return (
value.__class__.__name__ == "SparseTensor"
and "tensorflow.python." in str(value.__class__.__module__)
)
return False
def is_jax_array(value):
if hasattr(value, "__class__"):
for parent in value.__class__.__mro__:
if parent.__name__ == "Array" and str(parent.__module__) == "jax":
return True
return is_jax_sparse(value) # JAX sparse arrays do not extend jax.Array
def is_jax_sparse(value):
if hasattr(value, "__class__"):
return str(value.__class__.__module__).startswith(
"jax.experimental.sparse"
)
return False
def is_torch_tensor(value):
if hasattr(value, "__class__"):
for parent in value.__class__.__mro__:
if parent.__name__ == "Tensor" and str(parent.__module__).endswith(
"torch"
):
return True
return False
def is_scipy_sparse(x):
return str(x.__class__.__module__).startswith("scipy.sparse") and hasattr(
x, "tocoo"
)
def scipy_sparse_to_tf_sparse(x):
from keras.src.utils.module_utils import tensorflow as tf
coo = x.tocoo()
indices = np.concatenate(
(np.expand_dims(coo.row, 1), np.expand_dims(coo.col, 1)), axis=1
)
return tf.SparseTensor(indices, coo.data, coo.shape)
def scipy_sparse_to_jax_sparse(x):
import jax
import jax.experimental.sparse as jax_sparse
with jax.default_device(jax.local_devices(backend="cpu")[0]):
return jax_sparse.BCOO.from_scipy_sparse(x)
def tf_sparse_to_jax_sparse(x):
import jax
import jax.experimental.sparse as jax_sparse
values = np.asarray(x.values)
indices = np.asarray(x.indices)
with jax.default_device(jax.local_devices(backend="cpu")[0]):
return jax_sparse.BCOO((values, indices), shape=x.shape)
def jax_sparse_to_tf_sparse(x):
from keras.src.utils.module_utils import tensorflow as tf
return tf.SparseTensor(x.indices, x.data, x.shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/generator_data_adapter_test.py | keras/src/trainers/data_adapters/generator_data_adapter_test.py | import math
import jax
import jax.experimental.sparse as jax_sparse
import numpy as np
import pytest
import scipy
import tensorflow as tf
import torch
from absl.testing import parameterized
from jax import numpy as jnp
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import generator_data_adapter
def example_generator(x, y, sample_weight=None, batch_size=32):
def make():
for i in range(math.ceil(len(x) / batch_size)):
low = i * batch_size
high = min(low + batch_size, len(x))
batch_x = x[low:high]
batch_y = y[low:high]
if sample_weight is not None:
yield batch_x, batch_y, sample_weight[low:high]
else:
yield batch_x, batch_y
return make
class GeneratorDataAdapterTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{"testcase_name": "use_weight", "use_sample_weight": True},
{"testcase_name": "no_weight", "use_sample_weight": False},
],
generator_type=["np", "tf", "jax", "torch"],
)
)
def test_basic_flow(self, use_sample_weight, generator_type):
x = np.random.random((34, 4)).astype("float32")
y = np.array([[i, i] for i in range(34)], dtype="float32")
sw = np.random.random((34,)).astype("float32")
if generator_type == "tf":
x, y, sw = tf.constant(x), tf.constant(y), tf.constant(sw)
elif generator_type == "jax":
x, y, sw = jnp.array(x), jnp.array(y), jnp.array(sw)
elif generator_type == "torch":
x, y, sw = (
torch.as_tensor(x),
torch.as_tensor(y),
torch.as_tensor(sw),
)
if not use_sample_weight:
sw = None
make_generator = example_generator(
x,
y,
sample_weight=sw,
batch_size=16,
)
adapter = generator_data_adapter.GeneratorDataAdapter(make_generator())
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = (
jax.Array if generator_type == "jax" else np.ndarray
)
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
sample_order = []
for i, batch in enumerate(it):
if use_sample_weight:
self.assertEqual(len(batch), 3)
bx, by, bsw = batch
else:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
if use_sample_weight:
self.assertIsInstance(bsw, expected_class)
for j in range(by.shape[0]):
sample_order.append(by[j, 0])
self.assertAllClose(sample_order, list(range(34)))
def test_with_different_shapes(self):
def generator():
yield np.ones([16, 4], "float32"), np.ones([16, 2], "float32")
yield np.ones([16, 5], "float32"), np.ones([16, 2], "float32")
yield np.ones([2, 6], "float32"), np.ones([2, 2], "float32")
adapter = generator_data_adapter.GeneratorDataAdapter(generator())
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
else:
it = adapter.get_numpy_iterator()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="tf.data.Dataset specific behavior",
)
def test_with_unexpected_shapes(self):
def generator():
yield np.ones([16, 4], "float32"), np.ones([16, 2], "float32")
yield np.ones([16, 5], "float32"), np.ones([16, 2], "float32")
yield np.ones([16, 6], "float32"), np.ones([16, 3], "float32")
adapter = generator_data_adapter.GeneratorDataAdapter(generator())
it = iter(adapter.get_tf_dataset())
next(it)
next(it)
# note that Tensorflow wraps the TypeError in an InvalidArgumentError.
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"TypeError:.* shape \\(16, 3\\).* shape \\(None, 2\\) was expected"
".*first two batches",
):
next(it)
@parameterized.named_parameters(
named_product(generator_type=["tf", "jax", "scipy"])
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors",
)
def test_sparse_tensors(self, generator_type):
if generator_type == "tf":
x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 4))
y = tf.SparseTensor([[0, 0], [1, 1]], [3.0, 4.0], (2, 2))
elif generator_type == "jax":
x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 4))
y = jax_sparse.BCOO(([3.0, 4.0], [[0, 0], [1, 1]]), shape=(2, 2))
elif generator_type == "scipy":
x = scipy.sparse.coo_matrix(([1.0, 2.0], ([0, 1], [0, 2])), (2, 4))
y = scipy.sparse.coo_matrix(([3.0, 4.0], ([0, 1], [0, 1])), (2, 2))
def generate():
for _ in range(4):
yield x, y
adapter = generator_data_adapter.GeneratorDataAdapter(generate())
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.SparseTensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax_sparse.BCOO
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason="Backend does not support ragged tensors",
)
def test_ragged_tensors(self):
x = tf.ragged.constant(
[[[0.0, 1.0]], [[2.0, 3.0], [4.0, 5.0]]], ragged_rank=1
)
y = tf.ragged.constant(
[[[0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], ragged_rank=1
)
def generate():
for _ in range(4):
yield x, y
adapter = generator_data_adapter.GeneratorDataAdapter(generate())
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.RaggedTensor
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.shape, (2, None, 2))
self.assertEqual(by.shape, (2, None, 2))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/trainers/data_adapters/data_adapter_utils_test.py | keras/src/trainers/data_adapters/data_adapter_utils_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.trainers.data_adapters.data_adapter_utils import (
class_weight_to_sample_weights,
)
class TestClassWeightToSampleWeights(testing.TestCase):
@parameterized.named_parameters(
[
# Simple case, where y is flat
(
"simple_class_labels",
np.array([0, 1, 0, 2]),
{0: 1.0, 1: 2.0, 2: 3.0},
np.array([1.0, 2.0, 1.0, 3.0]),
),
# Testing with one-hot encoded labels,
# so basically the argmax statement
(
"one_hot_encoded_labels",
np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]),
{0: 1.0, 1: 2.0, 2: 3.0},
np.array([1.0, 2.0, 1.0, 3.0]),
),
# 3 is not mapped, so it's assigned the default weight (1)
(
"unmapped_class",
np.array([0, 3, 0, 2]),
{0: 1.0, 1: 2.0, 2: 3.0},
np.array([1.0, 1.0, 1.0, 3.0]),
),
(
"multi_dimensional_input",
np.array([[0], [1], [0], [2]]),
{0: 1.0, 1: 2.0, 2: 3.0},
np.array([1.0, 2.0, 1.0, 3.0]),
),
(
"all_unmapped",
np.array([0, 1, 0, 2]),
{},
np.array([1.0, 1.0, 1.0, 1.0]),
),
]
)
def test_class_weight_to_sample_weights(self, y, class_weight, expected):
self.assertAllClose(
class_weight_to_sample_weights(y, class_weight), expected
)
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_class_weight_to_sample_weights_torch_specific(self):
import torch
y = torch.from_numpy(np.array([0, 1, 0, 2]))
self.assertAllClose(
class_weight_to_sample_weights(y, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
y_one_hot = torch.from_numpy(
np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
)
self.assertAllClose(
class_weight_to_sample_weights(y_one_hot, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_class_weight_to_sample_weights_jax_specific(self):
import jax
y = jax.numpy.asarray(np.array([0, 1, 0, 2]))
self.assertAllClose(
class_weight_to_sample_weights(y, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
y_one_hot = jax.numpy.asarray(
np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
)
self.assertAllClose(
class_weight_to_sample_weights(y_one_hot, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="tensorflow only"
)
def test_class_weight_to_sample_weights_tf_specific(self):
import tensorflow as tf
y = tf.convert_to_tensor(np.array([0, 1, 0, 2]))
self.assertAllClose(
class_weight_to_sample_weights(y, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
y_one_hot = tf.convert_to_tensor(
np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
)
self.assertAllClose(
class_weight_to_sample_weights(y_one_hot, {0: 1.0, 1: 2.0, 2: 3.0}),
np.array([1.0, 2.0, 1.0, 3.0]),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/variable_mapping_test.py | keras/src/models/variable_mapping_test.py | import numpy as np
from keras.src import testing
from keras.src.saving import saving_lib_test
class VariableMappingTest(testing.TestCase):
def test_basics(self):
model = saving_lib_test._get_basic_functional_model()
model.optimizer.build(model.trainable_variables)
variable_map = model._get_variable_map()
self.assertIn("first_dense/kernel", variable_map)
self.assertIn("second_dense/bias", variable_map)
self.assertIn("adam/learning_rate", variable_map)
model = saving_lib_test._get_basic_sequential_model()
model.build((None, 1))
model.optimizer.build(model.trainable_variables)
variable_map = model._get_variable_map()
self.assertIn("sequential/dense_1/bias", variable_map)
self.assertIn("adam/learning_rate", variable_map)
model = saving_lib_test._get_subclassed_model()
model(np.ones((1, 1)))
model.optimizer.build(model.trainable_variables)
variable_map = model._get_variable_map()
self.assertIn("custom_model_x/my_dense_1/dense/kernel", variable_map)
self.assertIn("custom_model_x/my_dense_1/my_dict_weight", variable_map)
self.assertIn(
"custom_model_x/my_dense_1/my_additional_weight", variable_map
)
self.assertIn("adam/learning_rate", variable_map)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/functional_test.py | keras/src/models/functional_test.py | import os
import warnings
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import applications
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import saving
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.dtype_policies import dtype_policy
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.models import Functional
from keras.src.models import Model
from keras.src.models import Sequential
from keras.src.models.model import model_from_json
class FunctionalTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_flow_multi_input(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional([input_a, input_b], outputs, name="basic")
model.summary()
self.assertEqual(model.name, "basic")
self.assertIsInstance(model, Functional)
self.assertIsInstance(model, Model)
# Eager call
in_val = [np.random.random((2, 3)), np.random.random((2, 3))]
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2, name="input_a_2")
input_b_2 = Input(shape=(3,), batch_size=2, name="input_b_2")
in_val = [input_a_2, input_b_2]
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_scalar_input(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(), batch_size=2, name="input_b")
outputs = input_a + input_b[:, None]
model = Functional([input_a, input_b], outputs)
model.summary()
in_val = [np.zeros((2, 3)), np.ones((2,))]
out_val = model(in_val)
self.assertAllClose(out_val, np.ones((2, 3)))
@pytest.mark.requires_trainable_backend
def test_mutable_state(self):
inputs = Input(shape=(3,), batch_size=2, name="input")
x = layers.Dense(5)(inputs)
outputs = layers.Dense(5)(x)
model = Functional(inputs, outputs)
# Allow attaching state to a model that isn't directly part of the DAG.
# Most useful for functional subclasses.
model.extra_layer = layers.Dense(5)
@pytest.mark.requires_trainable_backend
def test_basic_flow_multi_output(self):
inputs = Input(shape=(3,), batch_size=2, name="input")
x = layers.Dense(5)(inputs)
output_a = layers.Dense(4)(x)
output_b = layers.Dense(5)(x)
model = Functional(inputs, [output_a, output_b])
# Eager call
in_val = np.random.random((2, 3))
out_val = model(in_val)
self.assertIsInstance(out_val, list)
self.assertEqual(len(out_val), 2)
self.assertEqual(out_val[0].shape, (2, 4))
self.assertEqual(out_val[1].shape, (2, 5))
# Symbolic call
out_val = model(Input(shape=(3,), batch_size=2))
self.assertIsInstance(out_val, list)
self.assertEqual(len(out_val), 2)
self.assertEqual(out_val[0].shape, (2, 4))
self.assertEqual(out_val[1].shape, (2, 5))
@pytest.mark.requires_trainable_backend
def test_basic_flow_dict_io(self):
input_a = Input(shape=(3,), batch_size=2, name="a")
input_b = Input(shape=(3,), batch_size=2, name="b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
with self.assertRaisesRegex(
ValueError, "All `inputs` values must be KerasTensors"
):
model = Functional({"a": "input_a", "b": input_b}, outputs)
with self.assertRaisesRegex(
ValueError, "All `outputs` values must be KerasTensors"
):
model = Functional({"a": input_a, "b": input_b}, "outputs")
model = Functional({"a": input_a, "b": input_b}, outputs)
# Eager call
in_val = {"a": np.random.random((2, 3)), "b": np.random.random((2, 3))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(3,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
def test_basic_flow_as_a_submodel(self):
# Build submodel
submodel_inputs = Input([4])
submodel_outputs = layers.Flatten()(submodel_inputs)
submodel = Model(submodel_inputs, submodel_outputs)
inputs = Input((None, 4))
outputs = layers.TimeDistributed(submodel)(inputs)
model = Model(inputs=inputs, outputs=outputs)
x = np.random.random((2, 3, 4))
y = model(x)
self.assertEqual(y.shape, (2, 3, 4))
@pytest.mark.requires_trainable_backend
def test_named_input_dict_io(self):
# Single input
input_a = Input(shape=(3,), batch_size=2, name="a")
x = layers.Dense(5)(input_a)
outputs = layers.Dense(4)(x)
model = Functional(input_a, outputs)
# Eager call
in_val = {"a": np.random.random((2, 3))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
in_val = {"a": input_a_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# ----
# Two inputs, input is list
input_a = Input(shape=(3,), batch_size=2, name="a")
input_b = Input(shape=(4,), batch_size=2, name="b")
a = layers.Dense(5)(input_a)
b = layers.Dense(5)(input_b)
x = layers.Concatenate()([a, b])
outputs = layers.Dense(4)(x)
model = Functional([input_a, input_b], outputs)
# Eager call
in_val = {"a": np.random.random((2, 3)), "b": np.random.random((2, 4))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(4,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# ----
# Two inputs, input is dict
model = Functional({"a": input_a, "b": input_b}, outputs)
# Eager call
in_val = {"a": np.random.random((2, 3)), "b": np.random.random((2, 4))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(4,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# ----
# Two inputs, input is dict with incorrect names
model = Functional({"c": input_a, "d": input_b}, outputs)
# Eager call
in_val = {"c": np.random.random((2, 3)), "d": np.random.random((2, 4))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(4,), batch_size=2)
in_val = {"c": input_a_2, "d": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Now we can't use the input names:
with self.assertRaises(ValueError):
in_val = {
"a": np.random.random((2, 3)),
"b": np.random.random((2, 4)),
}
out_val = model(in_val)
@pytest.mark.requires_trainable_backend
def test_input_dict_with_extra_field(self):
input_a = Input(shape=(3,), batch_size=2, name="a")
x = input_a * 5
outputs = x + 2
model = Functional({"a": input_a}, outputs)
with pytest.warns() as record:
# Eager call
in_val = {
"a": np.random.random((2, 3)),
"b": np.random.random((2, 1)),
}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 3))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(1,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 3))
self.assertLen(record, 1)
self.assertStartsWith(
str(record[0].message),
r"The structure of `inputs` doesn't match the expected structure",
)
@parameterized.named_parameters(
("list", list),
("tuple", tuple),
("dict", dict),
)
def test_restored_multi_output_type(self, out_type):
inputs = Input(shape=(3,), batch_size=2, name="input")
x = layers.Dense(5)(inputs)
output_a = layers.Dense(4)(x)
output_b = layers.Dense(5)(x)
if out_type is dict:
outputs = {"a": output_a, "b": output_b}
else:
outputs = out_type([output_a, output_b])
model = Functional(inputs, outputs)
model_restored = Functional.from_config(model.get_config())
# Eager call
in_val = np.random.random((2, 3))
out_val = model_restored(in_val)
self.assertIsInstance(out_val, out_type)
# Symbolic call
out_val = model_restored(Input(shape=(3,), batch_size=2))
self.assertIsInstance(out_val, out_type)
def test_restored_nested_input(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
x = layers.Dense(5)(input_a)
outputs = layers.Dense(4)(x)
model = Functional([[input_a]], outputs)
# Serialize and deserialize the model
json_config = model.to_json()
restored_json_config = model_from_json(json_config).to_json()
# Check that the serialized model is the same as the original
self.assertEqual(json_config, restored_json_config)
def test_functional_input_shape_and_type(self):
input = layers.Input((1024, 4))
conv = layers.Conv1D(32, 3)(input)
model = Functional(input, conv)
self.assertIsInstance(model.input, KerasTensor)
self.assertEqual(model.input_shape, (None, 1024, 4))
@pytest.mark.requires_trainable_backend
def test_layer_getters(self):
# Test mixing ops and layers
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5, name="dense_1")(x)
outputs = layers.Dense(4, name="dense_2")(x)
model = Functional([input_a, input_b], outputs)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model._operations), 5)
self.assertEqual(model.get_layer(index=0).name, "input_a")
self.assertEqual(model.get_layer(index=1).name, "input_b")
self.assertEqual(model.get_layer(index=2).name, "dense_1")
self.assertEqual(model.get_layer(index=3).name, "dense_2")
self.assertEqual(model.get_layer(name="dense_1").name, "dense_1")
@pytest.mark.requires_trainable_backend
def test_training_arg(self):
class Canary(layers.Layer):
def call(self, x, training=False):
assert training
return x
def compute_output_spec(self, x, training=False):
return backend.KerasTensor(x.shape, dtype=x.dtype)
inputs = Input(shape=(3,), batch_size=2)
outputs = Canary()(inputs)
model = Functional(inputs, outputs)
model(np.random.random((2, 3)), training=True)
def test_mask_arg(self):
# TODO
pass
@pytest.mark.requires_trainable_backend
def test_passing_inputs_by_name(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional([input_a, input_b], outputs)
# Eager call
in_val = {
"input_a": np.random.random((2, 3)),
"input_b": np.random.random((2, 3)),
}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2, name="input_a_2")
input_b_2 = Input(shape=(3,), batch_size=2, name="input_b_2")
in_val = {"input_a": input_a_2, "input_b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_rank_standardization(self):
# Downranking
inputs = Input(shape=(3,), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs)
out_val = model(np.random.random((2, 3, 1)))
self.assertEqual(out_val.shape, (2, 3))
# Upranking
inputs = Input(shape=(3, 1), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs)
out_val = model(np.random.random((2, 3)))
self.assertEqual(out_val.shape, (2, 3, 3))
@pytest.mark.requires_trainable_backend
def test_rank_standardization_failure(self):
# Simple input and rank too high
inputs = Input(shape=(3,), name="foo")
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs)
with self.assertRaisesRegex(ValueError, "name 'foo' .* path ''"):
model(np.random.random((2, 3, 4)))
# Deeply nested input and rank too low
inputs = [{"foo": Input(shape=(3,), name="my_input")}]
outputs = layers.Dense(3)(inputs[0]["foo"])
model = Functional(inputs, outputs)
with self.assertRaisesRegex(
ValueError, "name 'my_input' .* path '0.foo'"
):
model(np.random.random(()))
@pytest.mark.requires_trainable_backend
def test_dtype_standardization(self):
float_input = Input(shape=(2,), dtype="float16")
int_input = Input(shape=(2,), dtype="int32")
float_output = float_input + 2
int_output = int_input + 2
model = Functional((float_input, int_input), (float_output, int_output))
float_data, int_data = model((np.ones((2, 2)), np.ones((2, 2))))
self.assertEqual(backend.standardize_dtype(float_data.dtype), "float16")
self.assertEqual(backend.standardize_dtype(int_data.dtype), "int32")
@pytest.mark.requires_trainable_backend
def test_serialization(self):
# Test basic model
inputs = Input(shape=(3,), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs, trainable=False)
self.run_class_serialization_test(model)
# Test multi-io model
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
xa = layers.Dense(5, name="middle_a")(input_a)
xb = layers.Dense(5, name="middle_b")(input_b)
output_a = layers.Dense(4, name="output_a")(xa)
output_b = layers.Dense(4, name="output_b")(xb)
model = Functional(
[input_a, input_b], [output_a, output_b], name="func"
)
self.run_class_serialization_test(model)
# Test model that includes floating ops
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5, name="middle")(x)
output_a = layers.Dense(4, name="output_a")(x)
output_b = layers.Dense(4, name="output_b")(x)
model = Functional(
[input_a, input_b], [output_a, output_b], name="func"
)
self.run_class_serialization_test(model)
# Test model with dict i/o
input_a = Input(shape=(3,), batch_size=2, name="a")
input_b = Input(shape=(3,), batch_size=2, name="b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional({"a": input_a, "b": input_b}, outputs)
self.run_class_serialization_test(model)
@pytest.mark.requires_trainable_backend
def test_bad_input_spec(self):
# Single input
inputs = Input(shape=(4,))
outputs = layers.Dense(2)(inputs)
model = Functional(inputs, outputs)
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(ValueError, "expects 1 input"):
model([np.zeros((2, 4)), np.zeros((2, 4))])
# List input
input_a = Input(shape=(4,), name="a")
input_b = Input(shape=(4,), name="b")
x = input_a + input_b
outputs = layers.Dense(2)(x)
model = Functional([input_a, input_b], outputs)
with self.assertRaisesRegex(ValueError, "expects 2 input"):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model([np.zeros((2, 3)), np.zeros((2, 4))])
# Dict input
model = Functional({"a": input_a, "b": input_b}, outputs)
with self.assertRaisesRegex(ValueError, "expects 2 input"):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model({"a": np.zeros((2, 3)), "b": np.zeros((2, 4))})
@pytest.mark.requires_trainable_backend
def test_manual_input_spec(self):
inputs = Input(shape=(None, 3))
outputs = layers.Dense(2)(inputs)
model = Functional(inputs, outputs)
model.input_spec = InputSpec(shape=(None, 4, 3))
with self.assertRaisesRegex(
ValueError,
r"expected shape=\(None, 4, 3\), found shape=\(2, 3, 3\)",
):
model(np.zeros((2, 3, 3)))
model(np.zeros((2, 4, 3)))
def test_functional_slicing(self):
inputs = Input(shape=(None, 2), name="input")
x1 = layers.Dense(3, name="dense1")(inputs)
x2 = layers.Dense(4, name="dense2")(x1)
outputs = layers.Dense(5, name="dense3")(x2)
full_model = Functional(inputs, outputs, name="full_model")
self.assertLen(full_model.layers, 4)
partial_model_1 = Functional(x2, outputs, name="partial1")
self.assertLen(partial_model_1.layers, 2) # input_layer, dense3
self.assertIsInstance(partial_model_1.layers[0], layers.InputLayer)
self.assertEqual(partial_model_1.layers[1].name, "dense3")
partial_model_2 = Functional(x1, x2, name="partial2")
self.assertLen(partial_model_2.layers, 2) # input_layer, dense2
self.assertIsInstance(partial_model_2.layers[0], layers.InputLayer)
self.assertEqual(partial_model_2.layers[1].name, "dense2")
partial_model_3 = Functional(
full_model.get_layer("dense2").input, outputs, name="partial3"
)
self.assertLen(partial_model_3.layers, 3) # input_layer, dense2, dense3
self.assertIsInstance(partial_model_3.layers[0], layers.InputLayer)
self.assertEqual(partial_model_3.layers[1].name, "dense2")
self.assertEqual(partial_model_3.layers[2].name, "dense3")
partial_model_4 = Functional(
full_model.get_layer("dense1").input,
full_model.get_layer("dense2").output,
name="partial4",
)
self.assertLen(partial_model_4.layers, 3) # input_layer, dense1, dense2
self.assertIsInstance(partial_model_4.layers[0], layers.InputLayer)
self.assertEqual(partial_model_4.layers[1].name, "dense1")
self.assertEqual(partial_model_4.layers[2].name, "dense2")
def test_deeply_nested_model(self):
i1, i2, i3 = Input((1,)), Input((2,)), Input((3,))
o1, o2, o3 = (
layers.Dense(1)(i1),
layers.Dense(2)(i2),
layers.Dense(3)(i3),
)
model = Model(
{"1": i1, "others": {"2": i2, "3": i3}},
{"1": o1, "others": {"2": o2, "3": o3}},
)
out_eager = model(
{
"1": np.ones((2, 1)),
"others": {"2": np.ones((2, 2)), "3": np.ones((2, 3))},
}
)
out_symbolic = model(
{
"1": Input((1,), batch_size=2),
"others": {
"2": Input((2,), batch_size=2),
"3": Input((3,), batch_size=2),
},
}
)
for out in [out_eager, out_symbolic]:
self.assertIsInstance(out, dict)
self.assertEqual(set(out.keys()), {"1", "others"})
self.assertEqual(out["1"].shape, (2, 1))
self.assertIsInstance(out["others"], dict)
self.assertEqual(set(out["others"].keys()), {"2", "3"})
self.assertEqual(out["others"]["2"].shape, (2, 2))
self.assertEqual(out["others"]["3"].shape, (2, 3))
# Test serialization boundaries
temp_filepath = os.path.join(self.get_temp_dir(), "deeply_nested.keras")
model.save(temp_filepath)
loaded_model = saving.load_model(temp_filepath)
new_out_eager = loaded_model(
{
"1": np.ones((2, 1)),
"others": {"2": np.ones((2, 2)), "3": np.ones((2, 3))},
}
)
self.assertAllClose(out_eager["1"], new_out_eager["1"])
self.assertAllClose(
out_eager["others"]["2"], new_out_eager["others"]["2"]
)
self.assertAllClose(
out_eager["others"]["3"], new_out_eager["others"]["3"]
)
def test_optional_inputs(self):
class OptionalInputLayer(layers.Layer):
def call(self, x, y=None):
if y is not None:
return x + y
return x
def compute_output_shape(self, x_shape):
return x_shape
i1 = Input((2,))
i2 = Input((2,), optional=True)
outputs = OptionalInputLayer()(i1, i2)
model = Model([i1, i2], outputs)
# Eager test
out = model([np.ones((2, 2)), None])
self.assertAllClose(out, np.ones((2, 2)))
# Note: it's not intended to work in symbolic mode (yet).
def test_optional_dict_inputs(self):
class OptionalInputLayer(layers.Layer):
def call(self, x, y=None):
if y is not None:
return x + y
return x
def compute_output_shape(self, x_shape):
return x_shape
i1 = Input((2,), name="input1")
i2 = Input((2,), name="input2", optional=True)
outputs = OptionalInputLayer()(i1, i2)
model = Model({"input1": i1, "input2": i2}, outputs)
# Eager test
out = model({"input1": np.ones((2, 2)), "input2": None})
self.assertAllClose(out, np.ones((2, 2)))
# Note: it's not intended to work in symbolic mode (yet).
def test_warning_for_mismatched_inputs_structure(self):
def is_input_warning(w):
return str(w.message).startswith(
"The structure of `inputs` doesn't match the expected structure"
)
i1 = Input((2,))
i2 = Input((2,))
outputs = layers.Add()([i1, i2])
model = Model({"i1": i1, "i2": i2}, outputs)
with pytest.warns() as warning_logs:
model.predict([np.ones((2, 2)), np.zeros((2, 2))], verbose=0)
self.assertLen(list(filter(is_input_warning, warning_logs)), 1)
# No warning for mismatched tuples and lists.
model = Model([i1, i2], outputs)
with warnings.catch_warnings(record=True) as warning_logs:
model.predict((np.ones((2, 2)), np.zeros((2, 2))), verbose=0)
self.assertLen(list(filter(is_input_warning, warning_logs)), 0)
def test_for_functional_in_sequential(self):
# Test for a v3.4.1 regression.
if backend.image_data_format() == "channels_first":
image_size = (3, 100, 100)
else:
image_size = (100, 100, 3)
base_model = applications.mobilenet.MobileNet(
include_top=False, weights=None
)
model = Sequential()
model.add(layers.Input(shape=image_size))
model.add(base_model)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(7, activation="softmax"))
config = model.get_config()
model = Sequential.from_config(config)
def test_add_loss(self):
# TODO
pass
def test_layers_setter(self):
inputs = Input(shape=(3,), batch_size=2, name="input")
outputs = layers.Dense(5)(inputs)
model = Functional(inputs, outputs)
with self.assertRaisesRegex(
AttributeError, "`Model.layers` attribute is reserved"
):
model.layers = [layers.Dense(4)]
@pytest.mark.requires_trainable_backend
def test_dict_input_to_list_model(self):
vocabulary_size = 100
num_tags = 10
num_departments = 3
num_samples = 128
title = layers.Input(shape=(vocabulary_size,), name="title")
text_body = layers.Input(shape=(vocabulary_size,), name="text_body")
tags = layers.Input(shape=(num_tags,), name="tags")
features = layers.Concatenate()([title, text_body, tags])
features = layers.Dense(64, activation="relu")(features)
priority = layers.Dense(1, activation="sigmoid", name="priority")(
features
)
department = layers.Dense(
num_departments, activation="softmax", name="department"
)(features)
model = Functional(
inputs=[title, text_body, tags], outputs=[priority, department]
)
title_data = np.random.randint(
0, 2, size=(num_samples, vocabulary_size)
)
text_body_data = np.random.randint(
0, 2, size=(num_samples, vocabulary_size)
)
tags_data = np.random.randint(0, 2, size=(num_samples, num_tags))
priority_data = np.random.random(size=(num_samples, 1))
department_data = np.random.randint(
0, 2, size=(num_samples, num_departments)
)
# List style fit
model.compile(
optimizer="adam",
loss=["mean_squared_error", "categorical_crossentropy"],
metrics=[["mean_absolute_error"], ["accuracy"]],
)
model.fit(
[title_data, text_body_data, tags_data],
[priority_data, department_data],
epochs=1,
)
model.evaluate(
[title_data, text_body_data, tags_data],
[priority_data, department_data],
)
priority_preds, department_preds = model.predict(
[title_data, text_body_data, tags_data]
)
# Dict style fit
model.compile(
optimizer="adam",
loss={
"priority": "mean_squared_error",
"department": "categorical_crossentropy",
},
metrics={
"priority": ["mean_absolute_error"],
"department": ["accuracy"],
},
)
model.fit(
{
"title": title_data,
"text_body": text_body_data,
"tags": tags_data,
},
{"priority": priority_data, "department": department_data},
epochs=1,
)
model.evaluate(
{
"title": title_data,
"text_body": text_body_data,
"tags": tags_data,
},
{"priority": priority_data, "department": department_data},
)
priority_preds, department_preds = model.predict(
{
"title": title_data,
"text_body": text_body_data,
"tags": tags_data,
}
)
def test_list_input_with_dict_build(self):
x1 = Input((10,), name="IT")
x2 = Input((10,), name="IS")
y = layers.subtract([x1, x2])
model = Model(inputs={"IT": x1, "IS": x2}, outputs=y)
x1 = ops.ones((1, 10))
x2 = ops.zeros((1, 10))
# Works
_ = model({"IT": x1, "IS": x2})
with self.assertRaisesRegex(
ValueError,
"The structure of `inputs` doesn't match the expected structure",
):
model([x1, x2])
def test_functional_with_dtype_policy(self):
original_dtype_policy = dtype_policy.dtype_policy()
try:
dtype_policy.set_dtype_policy("mixed_float16")
inputs = Input((10,), name="input")
outputs = layers.Dense(5)(inputs)
model = Model(inputs=inputs, outputs=outputs)
# Verify that no cast node appears in the graph.
self.assertLen(model.operations, 2)
self.assertIsInstance(model.operations[0], layers.InputLayer)
self.assertIsInstance(model.operations[1], layers.Dense)
finally:
dtype_policy.set_dtype_policy(original_dtype_policy)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/cloning_test.py | keras/src/models/cloning_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.models.cloning import clone_model
def get_mlp_functional_model(shared_layers=False):
inputs = layers.Input(shape=(3,))
x = layers.Dense(2)(inputs)
if shared_layers:
layer = layers.Dense(2, name="shared")
x = layer(x)
x = layer(x)
outputs = layers.Dense(2)(x)
model = models.Model(inputs, outputs)
return model
def get_nested_functional_model():
inputs = layers.Input(shape=(4,))
x = layers.Dense(3)(inputs)
mlp = get_mlp_functional_model()
x = mlp(x)
outputs = layers.Dense(2)(x)
model = models.Model(inputs, outputs)
return model
def get_nested_sequential_model():
model = models.Sequential()
model.add(layers.Dense(2))
model.add(get_sequential_model(explicit_input=False))
model.add(layers.Dense(2))
return model
def get_cnn_functional_model(shared_layers=False):
inputs = layers.Input(shape=(7, 3))
x = layers.Conv1D(2, 2, padding="same")(inputs)
if shared_layers:
layer = layers.Conv1D(2, 2, padding="same", name="shared")
x = layer(x)
x = layer(x)
outputs = layers.Conv1D(2, 2, padding="same")(x)
model = models.Model(inputs, outputs)
return model
def get_sequential_model(explicit_input=True):
model = models.Sequential()
if explicit_input:
model.add(layers.Input(shape=(3,)))
model.add(layers.Dense(2))
model.add(layers.Dense(2))
return model
def get_cnn_sequential_model(explicit_input=True):
model = models.Sequential()
if explicit_input:
model.add(layers.Input(shape=(7, 3)))
model.add(layers.Conv1D(2, 2, padding="same"))
model.add(layers.Conv1D(2, 2, padding="same"))
return model
def get_subclassed_model():
class ExampleModel(models.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.d1 = layers.Dense(2)
self.d2 = layers.Dense(2)
def call(self, x):
return self.d2(self.d1(x))
return ExampleModel()
@pytest.mark.requires_trainable_backend
class CloneModelTest(testing.TestCase):
def assert_models_equal(self, model1, model2, ref_input):
result1 = model1(ref_input)
result2 = model2(ref_input)
for r1, r2 in zip(tree.flatten(result1), tree.flatten(result2)):
self.assertAllClose(
ops.convert_to_numpy(r1), ops.convert_to_numpy(r2)
)
def assert_weights_equal(self, model1, model2):
for a, b in zip(model1.weights, model2.weights):
self.assertAllClose(a.numpy(), b.numpy())
@parameterized.named_parameters(
("mlp_functional", get_mlp_functional_model),
("cnn_functional", get_cnn_functional_model, True),
("sequential", get_sequential_model),
(
"deferred_sequential",
lambda: get_sequential_model(explicit_input=False),
),
("subclassed", get_subclassed_model),
)
def test_cloning_correctness(self, model_fn, is_conv=False):
ref_input = np.random.random((2, 7, 3) if is_conv else (2, 3))
model = model_fn()
new_model = clone_model(model)
model(ref_input) # Maybe needed to build the model
new_model(ref_input) # Maybe needed to build the model
new_model.set_weights(model.get_weights())
self.assert_models_equal(model, new_model, ref_input)
@parameterized.named_parameters(
("mlp_functional", get_mlp_functional_model),
("cnn_functional", get_cnn_functional_model),
("sequential", get_sequential_model),
)
def test_custom_clone_function(self, model_fn):
def clone_function(layer):
config = layer.get_config()
config["name"] = f"{config['name']}_custom"
return layer.__class__.from_config(config)
model = model_fn()
new_model = clone_model(model, clone_function=clone_function)
for l1, l2 in zip(model.layers, new_model.layers):
if not isinstance(l1, layers.InputLayer):
self.assertEqual(l2.name, f"{l1.name}_custom")
@parameterized.named_parameters(
("cnn_functional", get_cnn_functional_model),
("cnn_sequential", get_cnn_sequential_model),
(
"cnn_sequential_noinputlayer",
lambda: get_cnn_sequential_model(explicit_input=False),
),
)
def test_input_tensors(self, model_fn):
ref_input = np.random.random((2, 7, 3))
model = model_fn()
model(ref_input) # Maybe needed to get model inputs if no Input layer
input_tensor = model.inputs[0]
new_model = clone_model(model, input_tensors=input_tensor)
tree.assert_same_structure(model.inputs, new_model.inputs)
tree.assert_same_structure(model.outputs, new_model.outputs)
def test_shared_layers_cloning(self):
model = get_mlp_functional_model(shared_layers=True)
new_model = clone_model(model)
self.assertLen(new_model.layers, 4)
def test_structured_io_cloning(self):
x = layers.Input((3,))
y = layers.Input((3,))
z1 = x + y
z2 = layers.Dense(5)(z1)
inputs = dict(x=x, y=y)
outputs = dict(z1=z1, z2=z2)
model0 = models.Model(inputs, outputs)
model = clone_model(model0)
tree.assert_same_structure(model.input, inputs)
tree.assert_same_structure(model.output, outputs)
model = clone_model(model0, input_tensors=inputs)
tree.assert_same_structure(model.input, inputs)
tree.assert_same_structure(model.output, outputs)
with self.assertRaisesRegex(
ValueError,
"`input_tensors` must have the same structure as model.input",
):
model = clone_model(model0, input_tensors=(x, y))
def test_call_fn(self):
model = get_mlp_functional_model(shared_layers=False)
def call_function(layer, *args, **kwargs):
out = layer(*args, **kwargs)
if isinstance(layer, layers.Dense):
out = layers.Dropout(0.5)(out)
return out
new_model = clone_model(
model,
clone_function=lambda x: x, # Reuse the same layers.
call_function=call_function,
)
self.assertLen(model.layers, 3)
self.assertLen(new_model.layers, 5)
self.assertIsInstance(new_model.layers[2], layers.Dropout)
self.assertIsInstance(new_model.layers[4], layers.Dropout)
ref_input = np.random.random((2, 3))
self.assert_models_equal(model, new_model, ref_input)
def test_recursive(self):
model = get_nested_functional_model()
def call_function(layer, *args, **kwargs):
out = layer(*args, **kwargs)
if isinstance(layer, layers.Dense):
out = layers.Dropout(0.5)(out)
return out
new_model = clone_model(
model,
clone_function=lambda x: x, # Reuse the same layers.
call_function=call_function,
recursive=True,
)
self.assertLen(model._flatten_layers(), 8)
self.assertLen(new_model._flatten_layers(), 12)
self.assertIsInstance(new_model.layers[3].layers[2], layers.Dropout)
self.assertIsInstance(new_model.layers[3].layers[4], layers.Dropout)
ref_input = np.random.random((2, 4))
self.assert_models_equal(model, new_model, ref_input)
# Sequential.
def clone_function(layer):
layer = layer.__class__.from_config(layer.get_config())
layer.flag = True
return layer
model = get_nested_sequential_model()
new_model = clone_model(
model,
clone_function=clone_function,
recursive=True,
)
ref_input = np.random.random((2, 3))
model(ref_input) # Maybe needed to build the model
new_model(ref_input) # Maybe needed to build the model
new_model.set_weights(model.get_weights())
self.assert_models_equal(model, new_model, ref_input)
for l1, l2 in zip(model._flatten_layers(), new_model._flatten_layers()):
if isinstance(l2, layers.Dense):
self.assertFalse(hasattr(l1, "flag"))
self.assertTrue(hasattr(l2, "flag"))
def test_compiled_model_cloning(self):
model = models.Sequential()
model.add(layers.Input((3,)))
model.add(layers.Dense(5, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy")
cloned_model = clone_model(model)
self.assertEqual(model.compiled, cloned_model.compiled)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/cloning.py | keras/src/models/cloning.py | from keras.src import backend
from keras.src import tree
from keras.src import utils
from keras.src.api_export import keras_export
from keras.src.layers import Input
from keras.src.layers import InputLayer
from keras.src.models.functional import Functional
from keras.src.models.functional import functional_like_constructor
from keras.src.models.sequential import Sequential
from keras.src.saving import serialization_lib
@keras_export("keras.models.clone_model")
def clone_model(
model,
input_tensors=None,
clone_function=None,
call_function=None,
recursive=False,
**kwargs,
):
"""Clone a Functional or Sequential `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Note that
`clone_model` will not preserve the uniqueness of shared objects within the
model (e.g. a single variable attached to two distinct layers will be
restored as two separate variables).
Args:
model: Instance of `Model`
(could be a Functional model or a Sequential model).
input_tensors: optional list of input tensors or InputLayer objects
to build the model upon. If not provided,
new `Input` objects will be created.
clone_function: Callable with signature `fn(layer)`
to be used to clone each layer in the target
model (except `Input` instances). It takes as argument the
layer instance to be cloned, and returns the corresponding layer
instance to be used in the model copy. If unspecified, this callable
defaults to the following serialization/deserialization function:
`lambda layer: layer.__class__.from_config(layer.get_config())`.
By passing a custom callable, you can customize your copy of the
model, e.g. by wrapping certain layers of interest (you might want
to replace all `LSTM` instances with equivalent
`Bidirectional(LSTM(...))` instances, for example).
Defaults to `None`.
call_function: Callable with signature
`fn(layer, *args, **kwargs)` to be used to call each
cloned layer and a set of inputs. It takes the layer instance,
the call arguments and keyword arguments, and returns the
call outputs. If unspecified, this callable defaults to
the regular `__call__()` method:
`def fn(layer, *args, **kwargs): return layer(*args, **kwargs)`.
By passing a custom callable, you can insert new layers before or
after a given layer. Note: this argument can only be used with
Functional models.
recursive: Boolean. Whether to recursively clone any Sequential
or Functional models encountered in the original
Sequential/Functional model. If `False`,
then inner models are cloned by calling `clone_function()`.
If `True`, then inner models are cloned by calling `clone_model()`
with the same `clone_function`, `call_function`, and `recursive`
arguments. Note that in this case, `call_function`
will not be propagated to any Sequential model
(since it is not applicable to Sequential models).
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights. The cloned model may behave
differently from the original model if a custom `clone_function`
or `call_function` modifies a layer or layer call.
Example:
```python
# Create a test Sequential model.
model = keras.Sequential([
keras.layers.Input(shape=(728,)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(1, activation='sigmoid'),
])
# Create a copy of the test model (with freshly initialized weights).
new_model = clone_model(model)
```
Using a `clone_function` to make a model deterministic by setting the
random seed everywhere:
```python
def clone_function(layer):
config = layer.get_config()
if "seed" in config:
config["seed"] = 1337
return layer.__class__.from_config(config)
new_model = clone_model(model, clone_function=clone_function)
```
Using a `call_function` to add a `Dropout` layer after each `Dense` layer
(without recreating new layers):
```python
def call_function(layer, *args, **kwargs):
out = layer(*args, **kwargs)
if isinstance(layer, keras.layers.Dense):
out = keras.layers.Dropout(0.5)(out)
return out
new_model = clone_model(
model,
clone_function=lambda x: x, # Reuse the same layers.
call_function=call_function,
)
```
Note that subclassed models cannot be cloned by default,
since their internal layer structure is not known.
To achieve equivalent functionality
as `clone_model` in the case of a subclassed model, simply make sure
that the model class implements `get_config()`
(and optionally `from_config()`), and call:
```python
new_model = model.__class__.from_config(model.get_config())
```
In the case of a subclassed model, you cannot using a custom
`clone_function`.
"""
cache = kwargs.pop("cache", None)
if kwargs:
raise ValueError(
f"Unexpected keyword argument(s): {tuple(kwargs.keys())}"
)
if isinstance(model, Sequential):
# Wrap clone_function to handle recursiveness and layer sharing.
clone_function = _wrap_clone_function(
clone_function,
call_function=call_function,
recursive=recursive,
cache=cache,
)
if call_function is not None:
raise ValueError(
"`call_function` argument is not supported with Sequential "
"models. In a Sequential model, layers aren't called "
"at model-construction time (they're merely listed). "
"Use `call_function` with Functional models only. "
"Received model of "
f"type '{model.__class__.__name__}', with "
f"call_function={clone_function}"
)
return _clone_sequential_model(
model,
clone_function=clone_function,
input_tensors=input_tensors,
)
if isinstance(model, Functional):
# Wrap clone_function to handle recursiveness and layer sharing.
clone_function = _wrap_clone_function(
clone_function,
call_function=call_function,
recursive=recursive,
cache=cache,
)
# If the get_config() method is the same as a regular Functional
# model, we're safe to use _clone_functional_model (which relies
# on a Functional constructor). In the case where the get_config
# is custom, this may not necessarily work, but if clone_function
# or input_tensors are passed, we attempt it anyway
# in order to preserve backwards compatibility.
if utils.is_default(model.get_config) or (
clone_function or input_tensors
):
return _clone_functional_model(
model,
clone_function=clone_function,
call_function=call_function,
input_tensors=input_tensors,
)
# Case of a custom model class
if clone_function or input_tensors:
raise ValueError(
"Arguments `clone_function` and `input_tensors` "
"are only supported for Sequential models "
"or Functional models. Received model of "
f"type '{model.__class__.__name__}', with "
f"clone_function={clone_function} and "
f"input_tensors={input_tensors}"
)
if call_function is not None:
raise ValueError(
"Argument `call_function` is only supported "
"for Functional models. Received model of "
f"type '{model.__class__.__name__}', with "
f"call_function={clone_function}"
)
config = serialization_lib.serialize_keras_object(model)
return serialization_lib.deserialize_keras_object(
config, custom_objects={model.__class__.__name__: model.__class__}
)
def _wrap_clone_function(
clone_function, call_function=None, recursive=False, cache=None
):
"""Wrapper to handle recursiveness and layer sharing."""
if clone_function is None:
def _clone_layer(layer):
return layer.__class__.from_config(layer.get_config())
clone_function = _clone_layer
if cache is None:
cache = {}
def wrapped_clone_function(layer):
if id(layer) in cache:
return cache[id(layer)]
if recursive:
if isinstance(layer, Sequential):
# Note: Sequential doesn't support call_function.
clone = clone_model(
layer,
clone_function=clone_function,
cache=cache,
)
cache[id(layer)] = clone
return clone
elif isinstance(layer, Functional):
clone = clone_model(
layer,
clone_function=clone_function,
call_function=call_function,
cache=cache,
)
cache[id(layer)] = clone
return clone
clone = clone_function(layer)
cache[id(layer)] = clone
return clone
return wrapped_clone_function
def _clone_sequential_model(model, clone_function, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Args:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
clone_function: callable to be applied on non-input layers in the model.
By default, it clones the layer (without copying the weights).
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
"""
if not isinstance(model, Sequential):
raise ValueError(
"Expected `model` argument "
"to be a `Sequential` model instance. "
f"Received: model={model}"
)
if not callable(clone_function):
raise ValueError(
"Expected `clone_function` argument to be a callable. "
f"Received: clone_function={clone_function}"
)
new_layers = [clone_function(layer) for layer in model.layers]
if isinstance(model._layers[0], InputLayer):
ref_input_layer = model._layers[0]
input_name = ref_input_layer.name
input_batch_shape = ref_input_layer.batch_shape
input_dtype = ref_input_layer._dtype
input_optional = ref_input_layer.optional
else:
input_name = None
input_dtype = None
input_batch_shape = None
input_optional = False
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)):
if len(input_tensors) != 1:
raise ValueError(
"Argument `input_tensors` must contain a single tensor."
)
input_tensors = input_tensors[0]
if not isinstance(input_tensors, backend.KerasTensor):
raise ValueError(
"Argument `input_tensors` must be a KerasTensor. "
f"Received invalid value: input_tensors={input_tensors}"
)
inputs = Input(
tensor=input_tensors,
name=input_name,
optional=input_optional,
)
new_layers = [inputs] + new_layers
else:
if input_batch_shape is not None:
inputs = Input(
batch_shape=input_batch_shape,
dtype=input_dtype,
name=input_name,
optional=input_optional,
)
new_layers = [inputs] + new_layers
cloned_model = Sequential(
new_layers, name=model.name, trainable=model.trainable
)
# If model compiled already then set same to cloned model
if model.compiled:
compiled_config = model.get_compile_config()
cloned_model.compile_from_config(compiled_config)
return cloned_model
def _clone_functional_model(
model, clone_function, input_tensors=None, call_function=None
):
"""Clone a `Functional` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Input layers are always cloned.
Args:
model: Instance of `Functional`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
clone_function: callable to be applied on non-input layers in the model.
By default, it clones the layer (without copying the weights).
Returns:
An instance of `Functional` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
"""
if not callable(clone_function):
raise ValueError(
"Expected `clone_function` argument to be a callable. "
f"Received: clone_function={clone_function}"
)
if not isinstance(model, Functional):
raise ValueError(
"Expected `model` argument "
f"to be a Functional Model instance. Received: model={model}"
)
if input_tensors is not None:
if not all(
isinstance(x, backend.KerasTensor)
for x in tree.flatten(input_tensors)
):
raise ValueError(
"All entries in `input_tensors` must be KerasTensors. "
f"Received invalid values: inputs_tensors={input_tensors}"
)
try:
tree.assert_same_structure(input_tensors, model.input)
except ValueError as e:
raise ValueError(
"`input_tensors` must have the same structure as model.input"
f"\nReference structure: {model.input}"
f"\nReceived structure: {input_tensors}"
) from e
else:
input_tensors = tree.map_structure(
lambda x: Input(batch_shape=x.shape, dtype=x.dtype, name=x.name),
model.input,
)
def operation_fn(layer):
new_layer = clone_function(layer)
return new_layer
output_tensors = model._run_through_graph(
input_tensors,
operation_fn=operation_fn,
call_fn=call_function,
)
if functional_like_constructor(model.__class__):
new_model = model.__class__(
input_tensors, output_tensors, name=model.name
)
else:
# This may be incorrect: the new model will end up having a different
# class than the original. However various existing models rely
# on this behavior, so we keep it.
new_model = Functional(input_tensors, output_tensors, name=model.name)
if model.compiled:
compiled_config = model.get_compile_config()
new_model.compile_from_config(compiled_config)
return new_model
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/functional.py | keras/src/models/functional.py | import copy
import inspect
import typing
import warnings
from keras.src import backend
from keras.src import ops
from keras.src import tree
from keras.src.backend.common import global_state
from keras.src.layers.core.input_layer import Input
from keras.src.layers.core.input_layer import InputLayer
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.legacy.saving import saving_utils
from keras.src.legacy.saving import serialization as legacy_serialization
from keras.src.models.model import Model
from keras.src.ops.function import Function
from keras.src.ops.function import _build_map
from keras.src.ops.function import make_node_key
from keras.src.ops.node import KerasHistory
from keras.src.ops.node import Node
from keras.src.ops.operation import Operation
from keras.src.saving import serialization_lib
from keras.src.utils import tracking
class Functional(Function, Model):
"""A `Functional` model is a `Model` defined as a directed graph of layers.
Three types of `Model` exist: subclassed `Model`, `Functional` model,
and `Sequential` (a special case of `Functional`).
A `Functional` model can be instantiated by passing two arguments to
`__init__()`. The first argument is the `keras.Input` objects
that represent the inputs to the model.
The second argument specifies the output tensors that represent
the outputs of this model. Both arguments can be a nested structure
of tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,), name='x1'),
'x2': keras.Input(shape=(1,), name='x2')}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2']])
model = keras.Model(inputs, outputs)
```
A `Functional` model constructed using the Functional API can also
include raw Keras 3 ops.
Example:
```python
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = ops.nn.relu(x)
model = keras.Model(inputs, outputs)
```
A new `Functional` model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras.Input(shape=(None, None, 3))
processed = keras.layers.RandomCrop(width=32, height=32)(inputs)
conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed)
pooling = keras.layers.GlobalAveragePooling2D()(conv)
feature = keras.layers.Dense(10)(pooling)
full_model = keras.Model(inputs, feature)
backbone = keras.Model(processed, conv)
activations = keras.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras.Input` objects, but with the tensors
that are originated from `keras.Input` objects.
Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard `Functional` model that support
all the existing API.
Args:
inputs: List of input tensors (must be created via `keras.Input()`
or originated from `keras.Input()`).
outputs: List of output tensors.
name: String, optional. Name of the model.
trainable: Boolean, optional. If the model's variables should be
trainable.
"""
def __new__(cls, *args, **kwargs):
return typing.cast(cls, super().__new__(cls))
@tracking.no_automatic_dependency_tracking
def __init__(self, inputs, outputs, name=None, **kwargs):
if isinstance(inputs, dict):
for k, v in inputs.items():
if isinstance(v, backend.KerasTensor) and k != v.name:
warnings.warn(
"When providing `inputs` as a dict, all keys in the "
"dict must match the names of the corresponding "
f"tensors. Received key '{k}' mapping to value {v} "
f"which has name '{v.name}'. Change the tensor name to "
f"'{k}' (via `Input(..., name='{k}')`)"
)
trainable = kwargs.pop("trainable", None)
flat_inputs = tree.flatten(inputs)
flat_outputs = tree.flatten(outputs)
for x in flat_inputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"All `inputs` values must be KerasTensors. Received: "
f"inputs={inputs} including invalid value {x} of "
f"type {type(x)}"
)
for x in flat_outputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"All `outputs` values must be KerasTensors. Received: "
f"outputs={outputs} including invalid value {x} of "
f"type {type(x)}"
)
if not all(is_input_keras_tensor(t) for t in flat_inputs):
inputs, outputs = clone_graph_nodes(inputs, outputs)
Function.__init__(self, inputs, outputs, name=name)
if trainable is not None:
self.trainable = trainable
self._layers = self.layers
self.build(None)
# We will convert directly (to the correct dtype per input).
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
output_layers = [x._keras_history[0] for x in self.outputs]
self.output_names = [x.name for x in output_layers]
def _lock_state(self):
# Unlike other layers, we allow Functional state to be mutable after
# build. E.g. to attach a layer to a model that is not part of the
# functional DAG.
pass
def _obj_type(self):
return "Functional"
@property
def layers(self):
layers = []
for operation in self._operations:
if isinstance(operation, Layer):
layers.append(operation)
return layers
@layers.setter
def layers(self, _):
raise AttributeError(
"`Model.layers` attribute is reserved and should not be used. "
"Please use another name."
)
def call(self, inputs, training=None, mask=None, **kwargs):
# Add support for training, masking
inputs = self._standardize_inputs(inputs)
if mask is None:
masks = [None] * len(inputs)
else:
masks = tree.flatten(mask)
for x, mask in zip(inputs, masks):
if mask is not None:
backend.set_keras_mask(x, mask)
outputs = self._run_through_graph(
inputs,
operation_fn=lambda op: operation_fn(
op, training=training, **kwargs
),
)
return unpack_singleton(outputs)
def compute_output_spec(self, inputs, training=None, mask=None):
# From Function
return super().compute_output_spec(inputs)
def compute_output_shape(self, input_shape):
# From Function
return super().compute_output_shape(input_shape)
def build(self, input_shape):
self.built = True
@property
def input_shape(self):
input_shapes = tree.map_structure(lambda x: x.shape, self.inputs)
if isinstance(input_shapes, list) and len(input_shapes) == 1:
return input_shapes[0]
return input_shapes
@property
def output_shape(self):
output_shapes = tree.map_structure(lambda x: x.shape, self.outputs)
if isinstance(output_shapes, list) and len(output_shapes) == 1:
return output_shapes[0]
return output_shapes
def _assert_input_compatibility(self, *args):
return super(Model, self)._assert_input_compatibility(*args)
def _maybe_warn_inputs_struct_mismatch(self, inputs, raise_exception=False):
try:
# We first normalize to tuples before performing the check to
# suppress warnings when encountering mismatched tuples and lists.
tree.assert_same_structure(
tree.lists_to_tuples(inputs),
tree.lists_to_tuples(self._inputs_struct),
)
except:
model_inputs_struct = tree.map_structure(
lambda x: x.name, self._inputs_struct
)
inputs_struct = tree.map_structure(
lambda x: f"Tensor(shape={x.shape})", inputs
)
msg = (
"The structure of `inputs` doesn't match the expected "
f"structure.\nExpected: {model_inputs_struct}\n"
f"Received: inputs={inputs_struct}"
)
if raise_exception:
raise ValueError(msg)
warnings.warn(msg)
def _convert_inputs_to_tensors(self, flat_inputs):
converted = []
for x, input in zip(flat_inputs, self._inputs):
if x is None: # TODO: check if optional
converted.append(x)
else:
converted.append(
ops.convert_to_tensor(
x, dtype=input.dtype, sparse=input.sparse
)
)
return converted
def _adjust_input_rank(self, flat_inputs):
adjusted = []
for i, x in enumerate(flat_inputs):
ref_shape = self._inputs[i].shape
if x is None:
adjusted.append(x)
continue
x_rank = len(x.shape)
ref_rank = len(ref_shape)
if x_rank == ref_rank:
adjusted.append(x)
continue
if x_rank == ref_rank + 1:
if x.shape[-1] == 1:
adjusted.append(ops.squeeze(x, axis=-1))
continue
if x_rank == ref_rank - 1:
if ref_shape[-1] == 1:
adjusted.append(ops.expand_dims(x, axis=-1))
continue
flat_paths_and_inputs = tree.flatten_with_path(self._inputs_struct)
path = ".".join(str(p) for p in flat_paths_and_inputs[i][0])
raise ValueError(
f"Invalid input shape for input {x} with name "
f"'{self._inputs[i].name}' and path '{path}'. Expected shape "
f"{ref_shape}, but input has incompatible shape {x.shape}"
)
# Add back metadata.
for i in range(len(flat_inputs)):
if hasattr(flat_inputs[i], "_keras_history"):
adjusted[i]._keras_history = flat_inputs[i]._keras_history
mask = backend.get_keras_mask(flat_inputs[i])
if mask is not None:
backend.set_keras_mask(adjusted[i], mask)
return adjusted
def _standardize_inputs(self, inputs):
raise_exception = False
if (
isinstance(self._inputs_struct, list)
and len(self._inputs_struct) == 1
and ops.is_tensor(inputs)
):
inputs = [inputs]
elif isinstance(inputs, dict) and not isinstance(
self._inputs_struct, dict
):
# This is to avoid warning
# when we have reconcilable dict/list structs
if hasattr(self._inputs_struct, "__len__") and all(
isinstance(i, backend.KerasTensor) for i in self._inputs_struct
):
expected_keys = set(i.name for i in self._inputs_struct)
keys = set(inputs.keys())
if expected_keys.issubset(keys):
inputs = [inputs[i.name] for i in self._inputs_struct]
else:
raise_exception = True
elif isinstance(self._inputs_struct, backend.KerasTensor):
if self._inputs_struct.name in inputs:
inputs = [inputs[self._inputs_struct.name]]
else:
raise_exception = True
else:
raise_exception = True
if (
isinstance(self._inputs_struct, dict)
and not isinstance(inputs, dict)
and list(self._inputs_struct.keys())
!= sorted(self._inputs_struct.keys())
):
raise_exception = True
self._maybe_warn_inputs_struct_mismatch(
inputs, raise_exception=raise_exception
)
flat_inputs = tree.flatten(inputs)
flat_inputs = self._convert_inputs_to_tensors(flat_inputs)
return self._adjust_input_rank(flat_inputs)
@property
def input(self):
# For backwards compatibility,
# override `input` to retrieve the used-provided
# constructor inputs
return self._inputs_struct
@property
def output(self):
return self._outputs_struct
def add_loss(self, loss):
# Symbolic only. TODO
raise NotImplementedError
@property
def input_spec(self):
if hasattr(self, "_manual_input_spec"):
return self._manual_input_spec
def shape_with_no_batch_size(x):
x = list(x)
if x:
x[0] = None
return tuple(x)
def make_spec_for_tensor(x, name=None):
optional = False
if isinstance(x._keras_history[0], InputLayer):
if x._keras_history[0].optional:
optional = True
return InputSpec(
shape=shape_with_no_batch_size(x.shape),
allow_last_axis_squeeze=True,
name=x._keras_history[0].name if name is None else name,
optional=optional,
)
if isinstance(self._inputs_struct, dict):
if all(
isinstance(x, backend.KerasTensor)
for x in self._inputs_struct.values()
):
# Case where `_nested_inputs` is a plain dict of Inputs.
names = sorted(self._inputs_struct.keys())
return [
make_spec_for_tensor(self._inputs_struct[name], name=name)
for name in names
]
return None # Deeply nested dict: skip checks.
return [make_spec_for_tensor(x) for x in self.inputs]
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
def get_config(self):
if not functional_like_constructor(self.__class__):
# Subclassed networks are not serializable
# (unless serialization is implemented by
# the author of the subclassed network).
return Model.get_config(self)
config = {
"name": self.name,
"trainable": self.trainable,
}
# Build a map from a layer unique name (make_node_key)
# to the index of the nodes that are saved in the config.
# Only nodes in network_nodes are saved.
node_reindexing_map = {}
for operation in self.operations:
if issubclass(operation.__class__, Functional):
# Functional models start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# i.e. we mark it to be saved
node_reindexing_map[node_key] = kept_nodes
kept_nodes += 1
# serialize and save the layers in layer_configs
layer_configs = []
for operation in self.operations: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
node_data = serialize_node(node, own_nodes=self._nodes)
if node_data is not None:
filtered_inbound_nodes.append(node_data)
serialize_obj_fn = serialization_lib.serialize_keras_object
if global_state.get_global_attribute("use_legacy_config", False):
# Legacy format serialization used for H5 and SavedModel
serialize_obj_fn = legacy_serialization.serialize_keras_object
layer_config = serialize_obj_fn(operation)
layer_config["name"] = operation.name
layer_config["inbound_nodes"] = filtered_inbound_nodes
layer_configs.append(layer_config)
config["layers"] = layer_configs
# Gather info about inputs and outputs.
def get_tensor_config(tensor):
operation = tensor._keras_history[0]
node_index = tensor._keras_history[1]
tensor_index = tensor._keras_history[2]
node_key = make_node_key(operation, node_index)
assert node_key in self._nodes
new_node_index = node_reindexing_map[node_key]
return [operation.name, new_node_index, tensor_index]
def map_tensors(tensors):
return tree.map_structure(get_tensor_config, tensors)
config["input_layers"] = map_tensors(self._inputs_struct)
config["output_layers"] = map_tensors(self._outputs_struct)
return copy.deepcopy(config)
def functional_from_config(cls, config, custom_objects=None):
"""Instantiates a Functional model from its config (from `get_config()`).
Args:
cls: Class of the model, e.g. a custom subclass of `Model`.
config: Output of `get_config()` for the original model instance.
custom_objects: Optional dict of custom objects.
Returns:
An instance of `cls`.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
"""Add node to layer list
Arg:
layer: layer object
node_data: Node data specifying layer call
"""
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Reconstruct node by linking to inbound layers
Args:
layer: Layer to process
node_data: List of layer configs
"""
args, kwargs = deserialize_node(node_data, created_layers)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
layer(*args, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer and index its inbound nodes.
Args:
layer_data: layer config dict.
"""
layer_name = layer_data["name"]
# Instantiate layer.
if "module" not in layer_data:
# Legacy format deserialization (no "module" key)
# used for H5 and SavedModel formats
layer = saving_utils.model_from_config(
layer_data, custom_objects=custom_objects
)
else:
layer = serialization_lib.deserialize_keras_object(
layer_data, custom_objects=custom_objects
)
if not isinstance(layer, Operation):
raise ValueError(
"Unexpected object from deserialization, expected a layer or "
f"operation, got a {type(layer)}"
)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data["inbound_nodes"]
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# Extract config used to instantiate Functional model from the config. The
# remaining config will be passed as keyword arguments to the Model
# constructor.
functional_config = {}
for key in ["layers", "input_layers", "output_layers"]:
functional_config[key] = config.pop(key)
for key in ["name", "trainable"]:
if key in config:
functional_config[key] = config.pop(key)
else:
functional_config[key] = None
# First, we create all layers and enqueue nodes to be processed
for layer_data in functional_config["layers"]:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in functional_config["layers"]:
layer = created_layers[layer_data["name"]]
# Process all nodes in layer, if not yet processed
if layer in unprocessed_nodes:
node_data_list = unprocessed_nodes[layer]
# Process nodes in order
node_index = 0
while node_index < len(node_data_list):
node_data = node_data_list[node_index]
try:
process_node(layer, node_data)
# If the node does not have all inbound layers
# available, stop processing and continue later
except IndexError:
break
node_index += 1
# If not all nodes processed then store unprocessed nodes
if node_index < len(node_data_list):
unprocessed_nodes[layer] = node_data_list[node_index:]
# If all nodes processed remove the layer
else:
del unprocessed_nodes[layer]
# Create list of input and output tensors and return new class
name = functional_config["name"]
trainable = functional_config["trainable"]
def get_tensor(layer_name, node_index, tensor_index):
assert layer_name in created_layers
layer = created_layers[layer_name]
if isinstance(layer, Functional):
# Functional models start out with a built-in node.
node_index -= 1
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
return layer_output_tensors[tensor_index]
def map_tensors(tensors):
if (
isinstance(tensors, list)
and len(tensors) == 3
and isinstance(tensors[0], str)
):
# Leaf
return get_tensor(*tensors)
if isinstance(tensors, dict):
return {k: map_tensors(v) for k, v in tensors.items()}
if isinstance(tensors, tuple):
return tuple([map_tensors(v) for v in tensors])
return [map_tensors(v) for v in tensors]
input_tensors = map_tensors(functional_config["input_layers"])
output_tensors = map_tensors(functional_config["output_layers"])
return cls(
inputs=input_tensors,
outputs=output_tensors,
name=name,
trainable=trainable,
**config,
)
def operation_fn(operation, **call_context_args):
"""Wraps each op to inject the call-context args."""
def call(*args, **kwargs):
# Propagate all registered call-context args
for name, value in call_context_args.items():
if (
name in getattr(operation, "_call_context_args", {})
and value is not None
):
kwargs[name] = value
return operation(*args, **kwargs)
return call
def functional_like_constructor(cls):
init_args = inspect.getfullargspec(cls.__init__).args[1:]
functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:]
if init_args == functional_init_args:
return True
return False
def unpack_singleton(x):
if isinstance(x, (list, tuple)) and len(x) == 1:
return x[0]
return x
def serialize_node(node, own_nodes=()):
if not node.input_tensors:
# Does not need to be serialized.
return
def serialize_keras_tensor(x):
# Serialize KerasTensor while converting
# node indices to only include nodes relevant to `own_nodes`.
if isinstance(x, backend.KerasTensor):
operation, node_index, tensor_index = x._keras_history
irrelevant_node_count = 0
for i, node in enumerate(operation._inbound_nodes[:node_index]):
node_key = make_node_key(operation, i)
if node_key not in own_nodes:
irrelevant_node_count += 1
x._keras_history = KerasHistory(
operation, node_index - irrelevant_node_count, tensor_index
)
serialized = serialization_lib.serialize_keras_object(x)
x._keras_history = KerasHistory(operation, node_index, tensor_index)
return serialized
return x
args = node.arguments.args
kwargs = node.arguments.kwargs
args = tree.map_structure(serialize_keras_tensor, args)
kwargs = tree.map_structure(serialize_keras_tensor, kwargs)
return {
"args": serialization_lib.serialize_keras_object(args),
"kwargs": serialization_lib.serialize_keras_object(kwargs),
}
def deserialize_node(node_data, created_layers):
"""Return (args, kwargs) for calling the node layer."""
if not node_data:
return [], {}
if isinstance(node_data, list):
# Legacy case.
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError(
"Cannot deserialize the model (invalid config data?)"
)
inbound_layer = created_layers[inbound_layer_name]
# Raise an error if the corresponding layer node
# has not yet been created
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
raise IndexError(
"Layer node index out of bounds.\n"
f"inbound_layer = {inbound_layer}\n"
"inbound_layer._inbound_nodes = "
f"{inbound_layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
inbound_node.output_tensors[inbound_tensor_index]
)
return [unpack_singleton(input_tensors)], kwargs
args = serialization_lib.deserialize_keras_object(node_data["args"])
kwargs = serialization_lib.deserialize_keras_object(node_data["kwargs"])
def convert_revived_tensor(x):
if isinstance(x, backend.KerasTensor):
history = x._pre_serialization_keras_history
if history is None:
return x
layer = created_layers.get(history[0], None)
if layer is None:
raise ValueError(f"Unknown layer: {history[0]}")
inbound_node_index = history[1]
inbound_tensor_index = history[2]
if len(layer._inbound_nodes) <= inbound_node_index:
raise IndexError(
"Layer node index out of bounds.\n"
f"inbound_layer = {layer}\n"
f"inbound_layer._inbound_nodes = {layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = layer._inbound_nodes[inbound_node_index]
return inbound_node.output_tensors[inbound_tensor_index]
return x
args = tree.map_structure(convert_revived_tensor, args)
kwargs = tree.map_structure(convert_revived_tensor, kwargs)
return args, kwargs
def is_input_keras_tensor(x):
(
operation,
node_index,
_,
) = x._keras_history
node = operation._inbound_nodes[node_index]
return node.is_input
def clone_single_keras_tensor(x):
return backend.KerasTensor(
shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=f"{x.name}_clone"
)
def clone_keras_tensors(tensors, kt_id_mapping):
def swap(x):
if not isinstance(x, backend.KerasTensor):
return x
if id(x) in kt_id_mapping:
return kt_id_mapping[id(x)]
new_x = clone_single_keras_tensor(x)
kt_id_mapping[id(x)] = new_x
return new_x
return tree.map_structure(swap, tensors)
def find_nodes_by_inputs_and_outputs(inputs, outputs):
nodes, _ = _build_map(inputs, outputs)
return nodes
def clone_graph_nodes(inputs, outputs):
"""Clone the `Node` between the inputs and output tensors.
This function is used to create a new functional model from any intermediate
Keras tensors. The clone of the nodes mimic the behavior of reconstructing
the functional graph network by re-executing all the `__call__()` methods.
The cloned nodes will be appended to the layers.
Note that a new `keras.Input` will be created for any items in the
`inputs`
Args:
inputs: A nested structure of `KerasTensor` instances.
outputs: A nested structure of `KerasTensor` instances.
Returns:
A pair of inputs and outputs, with cloned `KerasTensor` instances.
They can be used to create a new functional model.
"""
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone Keras tensors to avoid the override of _keras_history attached on
# the Keras tensor. The following dict is used to track any keras tensor we
# cloned The key is the string ID of the original keras tensor, and value is
# the cloned Keras tensor instance.
kt_id_mapping = {}
op_id_mapping = {}
for kt_input in tree.flatten(inputs):
if is_input_keras_tensor(kt_input):
# For any existing Keras tensor from keras.Input, leave them as is.
cloned_inputs.append(kt_input)
kt_id_mapping[id(kt_input)] = kt_input
else:
# We need to create a new Keras tensor for any intermediate tensor
original_op = kt_input._keras_history.operation
optional = False
if isinstance(original_op, InputLayer):
optional = original_op.optional
cloned_input = Input(
batch_shape=kt_input.shape,
dtype=kt_input.dtype,
sparse=kt_input.sparse,
name=f"{kt_input.name}CLONE",
optional=optional,
)
cloned_inputs.append(cloned_input)
kt_id_mapping[id(kt_input)] = cloned_input
op_id_mapping[id(kt_input._keras_history[0])] = (
cloned_input._keras_history[0]
)
cloned_inputs = tree.pack_sequence_as(inputs, cloned_inputs)
for kt_output in tree.flatten(outputs):
cpy = clone_single_keras_tensor(kt_output)
# We reuse the _keras_history here, which contains the old information.
cpy._keras_history = kt_output._keras_history
cloned_outputs.append(cpy)
kt_id_mapping[id(kt_output)] = cpy
cloned_outputs = tree.pack_sequence_as(outputs, cloned_outputs)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/model.py | keras/src/models/model.py | import inspect
import json
import typing
import warnings
from collections.abc import Callable
from keras.src import backend
from keras.src import utils
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.models.variable_mapping import map_saveable_variables
from keras.src.quantizers.gptq_core import gptq_quantize
from keras.src.quantizers.utils import should_quantize_layer
from keras.src.saving import saving_api
from keras.src.trainers import trainer as base_trainer
from keras.src.utils import summary_utils
from keras.src.utils import traceback_utils
if backend.backend() == "tensorflow":
from keras.src.backend.tensorflow.trainer import (
TensorFlowTrainer as Trainer,
)
elif backend.backend() == "jax":
from keras.src.backend.jax.trainer import JAXTrainer as Trainer
elif backend.backend() == "torch":
from keras.src.backend.torch.trainer import TorchTrainer as Trainer
elif backend.backend() == "numpy":
from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer
elif backend.backend() == "openvino":
from keras.src.backend.openvino.trainer import OpenVINOTrainer as Trainer
else:
raise RuntimeError(
f"Backend '{backend.backend()}' must implement the Trainer class."
)
@keras_export(["keras.Model", "keras.models.Model"])
class Model(Trainer, base_trainer.Trainer, Layer):
"""A model grouping layers into an object with training/inference features.
There are three ways to instantiate a `Model`:
## With the "Functional API"
You start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally, you create your model from inputs and outputs:
```python
inputs = keras.Input(shape=(37,))
x = keras.layers.Dense(32, activation="relu")(inputs)
outputs = keras.layers.Dense(5, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Note: Only dicts, lists, and tuples of input tensors are supported. Nested
inputs are not supported (e.g. lists of list or dicts of dict).
A new Functional API model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras.Input(shape=(None, None, 3))
processed = keras.layers.RandomCrop(width=128, height=128)(inputs)
conv = keras.layers.Conv2D(filters=32, kernel_size=3)(processed)
pooling = keras.layers.GlobalAveragePooling2D()(conv)
feature = keras.layers.Dense(10)(pooling)
full_model = keras.Model(inputs, feature)
backbone = keras.Model(processed, conv)
activations = keras.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras.Input` objects, but with the tensors that originate
from `keras.Input` objects. Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard Functional API models that support
all the existing APIs.
## By subclassing the `Model` class
In that case, you should define your
layers in `__init__()` and you should implement the model's forward pass
in `call()`.
```python
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call()`, which you can use to specify
a different behavior in training and inference:
```python
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
self.dropout = keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
Once the model is created, you can config the model with losses and metrics
with `model.compile()`, train the model with `model.fit()`, or use the model
to do prediction with `model.predict()`.
## With the `Sequential` class
In addition, `keras.Sequential` is a special case of model where
the model is purely a stack of single-input, single-output layers.
```python
model = keras.Sequential([
keras.Input(shape=(None, None, 3)),
keras.layers.Conv2D(filters=32, kernel_size=3),
])
```
"""
def __new__(cls, *args, **kwargs):
# Signature detection for usage of `Model` as a `Functional`
if functional_init_arguments(args, kwargs) and cls == Model:
from keras.src.models.functional import Functional
return Functional.__new__(Functional, *args, **kwargs)
return typing.cast(cls, super().__new__(cls))
def __init__(self, *args, **kwargs):
Trainer.__init__(self)
from keras.src.models import functional
# Signature detection for usage of a `Model` subclass
# as a `Functional` subclass
if functional_init_arguments(args, kwargs):
inject_functional_model_class(self.__class__)
functional.Functional.__init__(self, *args, **kwargs)
else:
Layer.__init__(self, *args, **kwargs)
def call(self, *args, **kwargs):
raise NotImplementedError(
f"Model {self.__class__.__name__} does not have a `call()` "
"method implemented."
)
@property
def layers(self):
return list(self._flatten_layers(include_self=False, recursive=False))
@layers.setter
def layers(self, _):
raise AttributeError(
"`Model.layers` attribute is reserved and should not be used. "
"Please use another name."
)
@traceback_utils.filter_traceback
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Args:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
"""
if index is not None and name is not None:
raise ValueError(
"Provide only a layer name or a layer index. Received: "
f"index={index}, name={name}."
)
if index is not None:
if len(self.layers) <= index:
raise ValueError(
f"Was asked to retrieve layer at index {index}"
f" but model only has {len(self.layers)}"
" layers."
)
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError(
f"No such layer: {name}. Existing layers are: "
f"{list(layer.name for layer in self.layers)}."
)
raise ValueError(
"Provide either a layer name or layer index at `get_layer`."
)
@traceback_utils.filter_traceback
def summary(
self,
line_length=None,
positions=None,
print_fn=None,
expand_nested=False,
show_trainable=False,
layer_range=None,
):
"""Prints a string summary of the network.
Args:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided, becomes
`[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
print_fn: Print function to use. By default, prints to `stdout`.
If `stdout` doesn't work in your environment, change to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
expand_nested: Whether to expand the nested models.
Defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
Defaults to `False`.
layer_range: a list or tuple of 2 strings,
which is the starting layer name and ending layer name
(both inclusive) indicating the range of layers to be printed
in summary. It also accepts regex patterns instead of exact
names. In this case, the start predicate will be
the first element that matches `layer_range[0]`
and the end predicate will be the last element
that matches `layer_range[1]`.
By default `None` considers all layers of the model.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
summary_utils.print_summary(
self,
line_length=line_length,
positions=positions,
print_fn=print_fn,
expand_nested=expand_nested,
show_trainable=show_trainable,
layer_range=layer_range,
)
@traceback_utils.filter_traceback
def save(self, filepath, overwrite=True, zipped=None, **kwargs):
"""Saves a model as a `.keras` file.
Note that `model.save()` is an alias for `keras.saving.save_model()`.
The saved `.keras` file contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state.
Args:
filepath: `str` or `pathlib.Path` object.
The path where to save the model. Must end in `.keras`
(unless saving the model as an unzipped directory
via `zipped=False`).
overwrite: Whether we should overwrite any existing model at
the target location, or instead ask the user via
an interactive prompt.
zipped: Whether to save the model as a zipped `.keras`
archive (default when saving locally), or as an
unzipped directory (default when saving on the
Hugging Face Hub).
Example:
```python
model = keras.Sequential(
[
keras.layers.Dense(5, input_shape=(3,)),
keras.layers.Softmax(),
],
)
model.save("model.keras")
loaded_model = keras.saving.load_model("model.keras")
x = keras.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
"""
return saving_api.save_model(
self, filepath, overwrite=overwrite, zipped=zipped, **kwargs
)
@traceback_utils.filter_traceback
def save_weights(self, filepath, overwrite=True, max_shard_size=None):
"""Saves all weights to a single file or sharded files.
By default, the weights will be saved in a single `.weights.h5` file.
If sharding is enabled (`max_shard_size` is not `None`), the weights
will be saved in multiple files, each with a size at most
`max_shard_size` (in GB). Additionally, a configuration file
`.weights.json` will contain the metadata for the sharded files.
The saved sharded files contain:
- `*.weights.json`: The configuration file containing 'metadata' and
'weight_map'.
- `*_xxxxxx.weights.h5`: The sharded files containing only the
weights.
Args:
filepath: `str` or `pathlib.Path` object. Path where the weights
will be saved. When sharding, the filepath must end in
`.weights.json`. If `.weights.h5` is provided, it will be
overridden.
overwrite: Whether to overwrite any existing weights at the target
location or instead ask the user via an interactive prompt.
max_shard_size: `int` or `float`. Maximum size in GB for each
sharded file. If `None`, no sharding will be done. Defaults to
`None`.
Example:
```python
# Instantiate a EfficientNetV2L model with about 454MB of weights.
model = keras.applications.EfficientNetV2L(weights=None)
# Save the weights in a single file.
model.save_weights("model.weights.h5")
# Save the weights in sharded files. Use `max_shard_size=0.25` means
# each sharded file will be at most ~250MB.
model.save_weights("model.weights.json", max_shard_size=0.25)
# Load the weights in a new model with the same architecture.
loaded_model = keras.applications.EfficientNetV2L(weights=None)
loaded_model.load_weights("model.weights.h5")
x = keras.random.uniform((1, 480, 480, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
# Load the sharded weights in a new model with the same architecture.
loaded_model = keras.applications.EfficientNetV2L(weights=None)
loaded_model.load_weights("model.weights.json")
x = keras.random.uniform((1, 480, 480, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
"""
return saving_api.save_weights(
self, filepath, overwrite=overwrite, max_shard_size=max_shard_size
)
@traceback_utils.filter_traceback
def load_weights(self, filepath, skip_mismatch=False, **kwargs):
"""Load the weights from a single file or sharded files.
Weights are loaded based on the network's topology. This means the
architecture should be the same as when the weights were saved. Note
that layers that don't have weights are not taken into account in the
topological ordering, so adding or removing layers is fine as long as
they don't have weights.
**Partial weight loading**
If you have modified your model, for instance by adding a new layer
(with weights) or by changing the shape of the weights of a layer, you
can choose to ignore errors and continue loading by setting
`skip_mismatch=True`. In this case any layer with mismatching weights
will be skipped. A warning will be displayed for each skipped layer.
**Sharding**
When loading sharded weights, it is important to specify `filepath` that
ends with `*.weights.json` which is used as the configuration file.
Additionally, the sharded files `*_xxxxx.weights.h5` must be in the same
directory as the configuration file.
Args:
filepath: `str` or `pathlib.Path` object. Path where the weights
will be saved. When sharding, the filepath must end in
`.weights.json`.
skip_mismatch: Boolean, whether to skip loading of layers where
there is a mismatch in the number of weights, or a mismatch in
the shape of the weights.
Example:
```python
# Load the weights in a single file.
model.load_weights("model.weights.h5")
# Load the weights in sharded files.
model.load_weights("model.weights.json")
```
"""
saving_api.load_weights(
self,
filepath,
skip_mismatch=skip_mismatch,
**kwargs,
)
def get_quantization_layer_structure(self, mode=None):
"""Returns the quantization structure for the model.
This method is intended to be overridden by model authors to provide
topology information required for structure-aware quantization modes
like 'gptq'.
Args:
mode: The quantization mode.
Returns:
A dictionary describing the topology, e.g.:
`{'pre_block_layers': [list], 'sequential_blocks': [list]}`
or `None` if the mode does not require structure or is not
supported. `'pre_block_layers'` is a list of layers that
the inputs should be passed through, before being passed to
the sequential blocks. For example, inputs to an LLM must
first be passed through an embedding layer, followed by
the transformer.
"""
del mode # Unused.
return None
def quantize(self, mode=None, config=None, filters=None, **kwargs):
"""Quantize the weights of the model.
Note that the model must be built first before calling this method.
`quantize` will recursively call `quantize(...)` in all layers and
will be skipped if the layer doesn't implement the function.
This method can be called by passing a `mode` string, which uses the
default configuration for that mode. Alternatively, a `config` object
can be passed to customize the behavior of the quantization (e.g. to
use specific quantizers for weights or activations).
Args:
mode: The mode of the quantization. Supported modes are:
`"int8"`, `"int4"`, `"float8"`, `"gptq"`. This is
optional if `config` is provided.
config: The configuration object specifying additional
quantization options. This argument allows to configure
the weight and activation quantizers. be an instance of
`keras.quantizers.QuantizationConfig`.
filters: Optional filters to apply to the quantization. Can be a
regex string, a list of regex strings, or a callable. Only the
layers which match the filter conditions will be quantized.
**kwargs: Additional keyword arguments.
Example:
Quantize a model to int8 with default configuration:
```python
# Build the model
model = keras.Sequential([
keras.Input(shape=(10,)),
keras.layers.Dense(10),
])
model.build((None, 10))
# Quantize with default int8 config
model.quantize("int8")
```
Quantize a model to int8 with a custom configuration:
```python
from keras.quantizers import Int8QuantizationConfig
from keras.quantizers import AbsMaxQuantizer
# Build the model
model = keras.Sequential([
keras.Input(shape=(10,)),
keras.layers.Dense(10),
])
model.build((None, 10))
# Create a custom config
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(
axis=0,
value_range=(-127, 127)
),
activation_quantizer=AbsMaxQuantizer(
axis=-1,
value_range=(-127, 127)
),
)
# Quantize with custom config
model.quantize(config=config)
```
"""
# Validate inputs.
type_check = kwargs.pop("type_check", True)
if kwargs:
raise ValueError(
"Unrecognized keyword arguments "
f"passed to {self.__class__.__name__}: {kwargs}"
)
if filters is not None:
if not isinstance(filters, (str, Callable, list, tuple)):
raise ValueError(
"The `filters` argument must be a regex string, a list of "
"regex strings, or a callable. Received: "
f"{type(filters)}"
)
graph_modified = False
for layer in self._flatten_layers():
# Apply filters
if not should_quantize_layer(layer, filters):
continue
if len(list(layer._flatten_layers())) == 1:
try:
layer.quantize(mode, type_check=type_check, config=config)
graph_modified = True
except NotImplementedError as e:
warnings.warn(str(e))
except AttributeError:
pass
if mode == "gptq":
# Resolve model structure.
# 1. If quantization_layer_structure is provided inside the config,
# use that.
structure = config.quantization_layer_structure
# 2. If no layer structure is provided in the config, try to fetch
# it using the `get_quantization_layer_structure` hook.
if structure is None:
structure = self.get_quantization_layer_structure(mode)
if structure is None:
raise ValueError(
"For 'gptq' mode, a valid quantization structure must be "
"provided either via `config.quantization_layer_structure` "
"or by overriding "
"`model.get_quantization_layer_structure(mode)`. The "
"structure should be a dictionary with keys "
"'pre_block_layers' and 'sequential_blocks'."
)
gptq_quantize(config, structure, filters=filters)
# If any layer was changed, we must rebuild the execution functions.
if graph_modified:
self.train_function = None
self.test_function = None
self.predict_function = None
self._post_quantize(mode, **kwargs)
def _post_quantize(self, mode, **kwargs):
if backend.backend() == "torch":
# We need to manually retrack `torch_params`.
# The reason is that after quantization, the removed variables are
# still referenced by `torch_params` and cannot be gc.
for layer in self._flatten_layers():
layer._track_variables()
def build_from_config(self, config):
if not config:
return
status = False
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
status = self._build_by_run_for_single_pos_arg(
config["input_shape"]
)
else:
try:
self.build(config["input_shape"])
status = True
except:
pass
self._build_shapes_dict = config
elif "shapes_dict" in config:
# Case: inputs were recorded as multiple keyword arguments.
if utils.is_default(self.build):
status = self._build_by_run_for_kwargs(config["shapes_dict"])
else:
try:
self.build(**config["shapes_dict"])
status = True
except:
pass
self._build_shapes_dict = config["shapes_dict"]
if not status:
warnings.warn(
f"Model '{self.name}' had a build config, but the model "
"cannot be built automatically in "
"`build_from_config(config)`. "
"You should implement "
"`def build_from_config(self, config)`, "
"and you might also want to implement the method "
" that generates the config at saving time, "
"`def get_build_config(self)`. "
"The method `build_from_config()` is meant to "
"create the state of the model (i.e. its variables) "
"upon deserialization.",
stacklevel=2,
)
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={...})`.
Args:
**kwargs: Additional keyword arguments to be passed to
`json.dumps()`.
Returns:
A JSON string.
"""
from keras.src.saving import serialization_lib
model_config = serialization_lib.serialize_keras_object(self)
return json.dumps(model_config, **kwargs)
def export(
self,
filepath,
format="tf_saved_model",
verbose=None,
input_signature=None,
**kwargs,
):
"""Export the model as an artifact for inference.
Args:
filepath: `str` or `pathlib.Path` object. The path to save the
artifact.
format: `str`. The export format. Supported values:
`"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"litert"`.
Defaults to `"tf_saved_model"`.
verbose: `bool`. Whether to print a message during export. Defaults
to `None`, which uses the default value set by different
backends and formats.
input_signature: Optional. Specifies the shape and dtype of the
model inputs. Can be a structure of `keras.InputSpec`,
`tf.TensorSpec`, `backend.KerasTensor`, or backend tensor. If
not provided, it will be automatically computed. Defaults to
`None`.
**kwargs: Additional keyword arguments.
- `is_static`: Optional `bool`. Specific to the JAX backend and
`format="tf_saved_model"`. Indicates whether `fn` is static.
Set to `False` if `fn` involves state updates (e.g., RNG
seeds and counters).
- `jax2tf_kwargs`: Optional `dict`. Specific to the JAX backend
and `format="tf_saved_model"`. Arguments for
`jax2tf.convert`. See the documentation for
[`jax2tf.convert`](
https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).
If `native_serialization` and `polymorphic_shapes` are not
provided, they will be automatically computed.
- `opset_version`: Optional `int`. Specific to `format="onnx"`.
An integer value that specifies the ONNX opset version.
- LiteRT-specific options: Optional keyword arguments specific
to `format="litert"`. These are passed directly to the
TensorFlow Lite converter and include options like
`optimizations`, `representative_dataset`,
`experimental_new_quantizer`, `allow_custom_ops`,
`enable_select_tf_ops`, etc. See TensorFlow Lite
documentation for all available options.
**Note:** This feature is currently supported only with TensorFlow, JAX
and Torch backends.
**Note:** Be aware that the exported artifact may contain information
from the local file system when using `format="onnx"`, `verbose=True`
and Torch backend.
Examples:
Here's how to export a TensorFlow SavedModel for inference.
```python
# Export the model as a TensorFlow SavedModel artifact
model.export("path/to/location", format="tf_saved_model")
# Load the artifact in a different process/environment
reloaded_artifact = tf.saved_model.load("path/to/location")
predictions = reloaded_artifact.serve(input_data)
```
Here's how to export an ONNX for inference.
```python
# Export the model as a ONNX artifact
model.export("path/to/location", format="onnx")
# Load the artifact in a different process/environment
ort_session = onnxruntime.InferenceSession("path/to/location")
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), input_data)
}
predictions = ort_session.run(None, ort_inputs)
```
Here's how to export a LiteRT (TFLite) for inference.
```python
# Export the model as a LiteRT artifact
model.export("path/to/location", format="litert")
# Load the artifact in a different process/environment
interpreter = tf.lite.Interpreter(model_path="path/to/location")
interpreter.allocate_tensors()
interpreter.set_tensor(
interpreter.get_input_details()[0]['index'], input_data
)
interpreter.invoke()
output_data = interpreter.get_tensor(
interpreter.get_output_details()[0]['index']
)
```
"""
from keras.src.export import export_litert
from keras.src.export import export_onnx
from keras.src.export import export_openvino
from keras.src.export import export_saved_model
available_formats = ("tf_saved_model", "onnx", "openvino", "litert")
if format not in available_formats:
raise ValueError(
f"Unrecognized format={format}. Supported formats are: "
f"{list(available_formats)}."
)
# Check if LiteRT export is available (requires TensorFlow backend)
if format == "litert" and backend.backend() != "tensorflow":
raise ImportError("LiteRT export requires TensorFlow backend.")
if format == "tf_saved_model":
export_saved_model(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
elif format == "onnx":
export_onnx(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
elif format == "openvino":
export_openvino(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
elif format == "litert":
export_litert(
self,
filepath,
input_signature=input_signature,
**kwargs,
)
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.src.models.functional import Functional
functional_config_keys = [
"name",
"layers",
"input_layers",
"output_layers",
]
is_functional_config = all(
key in config for key in functional_config_keys
)
argspec = inspect.getfullargspec(cls.__init__)
functional_init_args = inspect.getfullargspec(Functional.__init__).args[
1:
]
revivable_as_functional = (
cls in {Functional, Model}
or argspec.args[1:] == functional_init_args
or (argspec.varargs == "args" and argspec.varkw == "kwargs")
)
if is_functional_config and revivable_as_functional:
# Revive Functional model
# (but not Functional subclasses with a custom __init__)
from keras.src.models.functional import functional_from_config
return functional_from_config(
cls, config, custom_objects=custom_objects
)
# Either the model has a custom __init__, or the config
# does not contain all the information necessary to
# revive a Functional model. This happens when the user creates
# subclassed models where `get_config()` is returning
# insufficient information to be considered a Functional model.
# In this case, we fall back to provide all config into the
# constructor of the class.
try:
return cls(**config)
except TypeError as e:
raise TypeError(
"Unable to revive model from config. When overriding "
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/model_test.py | keras/src/models/model_test.py | import os
import pickle
from collections import namedtuple
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import losses
from keras.src import testing
from keras.src import tree
from keras.src.layers.core.input_layer import Input
from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.models.model import model_from_json
def _get_model():
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Model([input_a, input_b], outputs)
return model
def _get_model_multi_outputs_list():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
output_b = layers.Dense(1, name="output_b", activation="sigmoid")(x)
model = Model(x, [output_a, output_b])
return model
def _get_model_multi_outputs_list_no_output_names():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1)(x)
output_b = layers.Dense(1, activation="sigmoid")(x)
model = Model(x, [output_a, output_b])
return model
def _get_model_single_output():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, output_a)
return model
def _get_model_single_output_list():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, [output_a])
return model
def _get_model_single_output_dict():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, {"output_a": output_a})
return model
def _get_model_multi_outputs_dict():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
output_b = layers.Dense(1, name="output_b", activation="sigmoid")(x)
model = Model(x, {"output_a": output_a, "output_b": output_b})
return model
def _get_model_multi_outputs_struct_list_like(_type):
x = Input(shape=(3,), name="x")
y1 = layers.Dense(1, name="y1", activation="sigmoid")(x)
y2 = layers.Dense(1, name="y2", activation="sigmoid")(x)
model = Model(x, _type([y1, y2]))
return model
def _get_model_multi_outputs_struct_namedtuple():
Y = namedtuple("Y", ["y1", "y2"])
x = Input(shape=(3,), name="x")
y1 = layers.Dense(1, name="y1", activation="sigmoid")(x)
y2 = layers.Dense(1, name="y2", activation="sigmoid")(x)
model = Model(x, Y(y1, y2))
return model, Y
def _get_model_multi_outputs_struct_dict():
x = Input(shape=(3,), name="x")
y1 = layers.Dense(1, name="y1", activation="sigmoid")(x)
y2 = layers.Dense(1, name="y2", activation="sigmoid")(x)
model = Model(x, {"a": y1, "b": y2})
return model
def _get_model_multi_outputs_struct():
x = Input(shape=(3,), name="x")
y1 = layers.Dense(1, name="y1", activation="sigmoid")(x)
y2 = layers.Dense(1, name="y2", activation="sigmoid")(x)
y3 = layers.Dense(1, name="y3", activation="sigmoid")(x)
model = Model(
x,
{
"a": (y1, y2),
"b": {"b1": y1, "b2": y2},
"c": {"c1": (y1, y2), "c2": y2},
"d": y3,
},
)
return model
def _get_model_multi_outputs_dict_with_single_tensor():
x = Input(shape=(3,), name="input_a")
output = layers.Dense(1, name="output_a")(x)
model = Model(x, {"output_a": output, "output_b": output})
return model
def _get_model_with_custom_compute_loss():
class MyModel(Model):
def __init__(self):
inputs = Input(shape=(3,), name="inputs")
outputs = layers.Dense(1, name="a")(inputs)
super().__init__(inputs=inputs, outputs=outputs)
def compute_loss(self, x, y, y_pred, sample_weight=None, **kwargs):
y_pred = [y_pred, y_pred] # To list
return super().compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, **kwargs
)
model = MyModel()
return model
def _get_model_with_duplicate_variable_path():
class MyModel(Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu", name="layer1")
self.dense2 = layers.Dense(4, activation="relu", name="layer1")
self.dense3 = layers.Dense(2)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
model = MyModel()
x = np.random.random((1, 16))
model(x)
return model
def _get_model_optional_inputs():
class OptionalInputLayer(layers.Layer):
def __init__(self):
super().__init__()
self.dense = layers.Dense(2)
def call(self, x, o=None):
z = x if o is None else x + o
return self.dense(z)
x = Input((2,), name="x")
o = Input((2,), name="o", optional=True)
y = OptionalInputLayer()(x, o)
model = Model({"x": x, "o": o}, y)
return model
def _get_variable_value_by_path(variables, path):
for v in variables:
if v.path == path:
return v.value
raise ValueError(f"No variable was find with path = {path}")
@pytest.mark.requires_trainable_backend
class ModelTest(testing.TestCase):
def test_functional_rerouting(self):
model = _get_model()
self.assertIsInstance(model, Functional)
def test_json_serialization(self):
model = _get_model()
json_string = model.to_json()
new_model = model_from_json(json_string)
self.assertEqual(json_string, new_model.to_json())
def test_tuple_input_model_subclass(self):
# https://github.com/keras-team/keras/issues/324
class MultiInputModel(Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense1 = layers.Dense(4)
def call(self, inputs):
a, b = inputs
r = self.dense1(a)
return layers.concatenate([r, b])
model = MultiInputModel()
x1 = np.random.rand(3, 3)
x2 = np.random.rand(3, 2)
out = model((x1, x2))
self.assertEqual(out.shape, (3, 6))
def test_reviving_functional_from_config_custom_layer(self):
class CustomDense(layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.dense = layers.Dense(units)
def call(self, x):
return self.dense(x)
inputs = layers.Input((4,))
outputs = CustomDense(10)(inputs)
model = Model(inputs, outputs)
config = model.get_config()
new_model = Model.from_config(
config, custom_objects={"CustomDense": CustomDense}
)
self.assertIsInstance(new_model, Functional)
def test_reviving_functional_from_config_custom_model(self):
class CustomModel(Model):
def __init__(self, *args, param=1, **kwargs):
super().__init__(*args, **kwargs)
self.param = param
def get_config(self):
base_config = super().get_config()
config = {"param": self.param}
return base_config | config
inputs = layers.Input((3,))
outputs = layers.Dense(5)(inputs)
model = CustomModel(inputs=inputs, outputs=outputs, param=3)
new_model = CustomModel.from_config(model.get_config())
self.assertEqual(new_model.param, 3)
@parameterized.named_parameters(
("single_output_1", _get_model_single_output),
("single_output_2", _get_model_single_output),
("single_output_3", _get_model_single_output),
("single_output_4", _get_model_single_output),
("single_list_output_1", _get_model_single_output_list),
("single_list_output_2", _get_model_single_output_list),
("single_list_output_3", _get_model_single_output_list),
("single_list_output_4", _get_model_single_output_list),
)
def test_functional_pickling(self, model_fn):
model = model_fn()
self.assertIsInstance(model, Functional)
model.compile()
x = np.random.rand(8, 3)
reloaded_pickle = pickle.loads(pickle.dumps(model))
pred_reloaded = reloaded_pickle.predict(x)
pred = model.predict(x)
self.assertAllClose(np.array(pred_reloaded), np.array(pred))
@parameterized.named_parameters(
("single_output_1", _get_model_single_output, None),
("single_output_2", _get_model_single_output, "list"),
("single_output_3", _get_model_single_output, "dict"),
("single_output_4", _get_model_single_output, "dict_list"),
("single_list_output_1", _get_model_single_output_list, None),
("single_list_output_2", _get_model_single_output_list, "list"),
("single_list_output_3", _get_model_single_output_list, "dict"),
("single_list_output_4", _get_model_single_output_list, "dict_list"),
("single_dict_output_1", _get_model_single_output_dict, None),
("single_dict_output_2", _get_model_single_output_dict, "list"),
("single_dict_output_3", _get_model_single_output_dict, "dict"),
("single_dict_output_4", _get_model_single_output_dict, "dict_list"),
)
def test_functional_single_output(self, model_fn, loss_type):
model = model_fn()
self.assertIsInstance(model, Functional)
loss = "mean_squared_error"
if loss_type == "list":
loss = [loss]
elif loss_type == "dict":
loss = {"output_a": loss}
elif loss_type == "dict_list":
loss = {"output_a": [loss]}
model.compile(
optimizer="sgd",
loss=loss,
metrics={
"output_a": ["mean_squared_error", "mean_absolute_error"],
},
weighted_metrics={
"output_a": "mean_squared_error",
},
)
# Fit the model to make sure compile_metrics are built
x = np.random.rand(8, 3)
y = np.random.rand(8, 1)
hist = model.fit(
x,
y,
batch_size=2,
epochs=1,
verbose=0,
)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"mean_absolute_error",
"mean_squared_error",
"weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mean_squared_error", "binary_crossentropy"],
metrics=[
"mean_squared_error",
["mean_squared_error", "accuracy"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_mean_squared_error",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_list_losses_abbr(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mse", "bce"],
metrics=[
["bce", "mse", "mae"],
["mse", "acc"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_bce",
"output_a_mae",
"output_a_mse",
"output_b_acc",
"output_b_loss",
"output_b_mse",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_nested_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mean_squared_error", ["binary_crossentropy"]],
metrics=[
"mean_squared_error",
["mean_squared_error", "accuracy"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_mean_squared_error",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_dict_outputs_dict_losses(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": ["binary_crossentropy"],
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Check dict outputs.
outputs = model.predict(x)
self.assertIsInstance(outputs, dict)
self.assertEqual(outputs["output_a"].shape, (8, 1))
self.assertEqual(outputs["output_b"].shape, (8, 1))
# Fit the model to make sure compile_metrics are built
hist = model.fit(
x,
{"output_a": y1, "output_b": y2},
batch_size=2,
epochs=1,
verbose=0,
)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
"output_b_weighted_accuracy",
"output_b_weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_dict_outputs_dict_losses_with_undefined_loss(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_b": ["binary_crossentropy"],
},
metrics={
"output_b": ["mean_squared_error", "accuracy"],
},
weighted_metrics={
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Check dict outputs.
outputs = model.predict(x)
self.assertIsInstance(outputs, dict)
self.assertEqual(outputs["output_a"].shape, (8, 1))
self.assertEqual(outputs["output_b"].shape, (8, 1))
# Fit the model to make sure compile_metrics are built
hist = model.fit(
x,
{"output_a": y1, "output_b": y2},
batch_size=2,
epochs=1,
verbose=0,
)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_b_accuracy",
"output_b_mean_squared_error",
"output_b_weighted_accuracy",
"output_b_weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_metrics(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Check list outputs.
outputs = model.predict(x)
self.assertIsInstance(outputs, list)
self.assertEqual(outputs[0].shape, (8, 1))
self.assertEqual(outputs[1].shape, (8, 1))
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
"output_b_weighted_accuracy",
"output_b_weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_metrics_uniq_weighted(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# `output_b_accuracy` doesn't have `weighted_` in metric name.
# When a metric is only in weighted metrics, it skips `weighted_`
# prefix. This behavior matches`tf.keras`.
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_partial_metrics(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"output_a_loss",
"output_b_accuracy",
"output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_dict_outputs_with_single_tensor(self):
model = _get_model_multi_outputs_dict_with_single_tensor()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
# `model` has 2 outputs, but there is actually only 1 output tensor.
self.assertLen(model.outputs, 2)
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
)
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(["loss", "output_a_loss", "output_b_loss"])
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_with_custom_compute_loss(self):
model = _get_model_with_custom_compute_loss()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
# `model` has 1 output, but in `compute_loss` it is separated into 2.
self.assertLen(model.outputs, 1)
model.compile(
optimizer="sgd", loss=["mean_squared_error", "binary_crossentropy"]
)
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
["binary_crossentropy_loss", "loss", "mean_squared_error_loss"]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_invalid_keys(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_c": "binary_crossentropy",
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"Expected keys",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_dict_losses_no_output_names(self):
model = _get_model_multi_outputs_list_no_output_names()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={"output_a": "mean_squared_error"},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"Expected keys",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_dict_metrics_invalid_keys(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_c": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `metrics`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_dict_outputs_dict_losses_invalid_keys(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_c": "binary_crossentropy",
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
KeyError,
"in the `loss` argument, can't be found "
"in either the model's output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_dict_outputs_dict_metrics_invalid_keys(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_c": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `metrics`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_invalid_nested_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=[
"mean_squared_error",
["mean_squared_error", "binary_crossentropy"],
],
)
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(["loss", "output_a_loss", "output_b_loss"])
self.assertListEqual(hist_keys, ref_keys)
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
)
def test_quantize(self, mode):
model = _get_model()
x1 = np.random.rand(2, 3)
x2 = np.random.rand(2, 3)
model.quantize(mode)
_ = model((x1, x2))
for layer in model._flatten_layers():
if isinstance(layer, (layers.Dense, layers.EinsumDense)):
self.assertEqual(
layer.dtype_policy.name, f"{mode}_from_float32"
)
self.assertEqual(layer.dtype_policy.quantization_mode, mode)
if mode == "int8":
self.assertLen(model.variables, 6)
if backend.backend() == "torch":
self.assertLen(list(model.named_parameters()), 6)
elif mode == "float8":
self.assertLen(model.variables, 16)
if backend.backend() == "torch":
self.assertLen(list(model.named_parameters()), 16)
@parameterized.named_parameters(
("regex_string", "dense_1", ["dense_1"]),
("list_of_regex", ["dense_1", "output"], ["dense_1", "output"]),
("callable", lambda l: "dense" in l.name, ["dense_1", "dense_2"]),
)
def test_quantize_with_filters(self, filters, expected_quantized_layers):
mode = "int8"
inputs = layers.Input([3])
x = layers.Dense(32, name="dense_1")(inputs)
x = layers.Dense(32, name="dense_2")(x)
outputs = layers.Dense(32, name="output")(x)
model = Model(inputs, outputs)
model.quantize(mode, filters=filters)
for layer in model._flatten_layers():
if layer.name in expected_quantized_layers:
self.assertEqual(
layer.dtype_policy.name, f"{mode}_from_float32"
)
elif isinstance(layer, layers.Dense):
self.assertNotEqual(
layer.dtype_policy.name, f"{mode}_from_float32"
)
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
)
def test_quantize_unbuilt(self, mode):
class MyModel(Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(32, activation="relu")
self.dense2 = layers.Dense(5, activation="softmax")
self.dropout = layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
with self.assertRaisesRegex(
ValueError, "Cannot quantize a layer that isn't yet built."
):
model.quantize(mode)
x = np.random.rand(2, 3)
_ = model(x)
model.quantize(mode)
def test_quantize_invalid_args(self):
model = _get_model()
with self.assertRaisesRegex(
ValueError, "Invalid quantization mode. Expected one of"
):
model.quantize("abc")
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword arguments"
):
model.quantize("int8", unrecognized_kwargs=None)
with self.assertRaisesRegex(ValueError, "Invalid quantization mode"):
model.quantize("int7")
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
)
def test_quantize_nested_model(self, mode):
class NestedLayer(layers.Layer):
def __init__(self, units):
super().__init__()
self.dense = layers.Dense(units)
def call(self, x):
x = self.dense(x)
return x
class DoubleNestedLayer(layers.Layer):
def __init__(self, units):
super().__init__()
self.nested_dense1 = NestedLayer(units)
self.nested_dense2 = NestedLayer(units)
self.dense = layers.Dense(units)
def call(self, x):
x = self.nested_dense1(x)
x = self.nested_dense2(x)
x = self.dense(x)
return x
inputs = layers.Input([3])
outputs = DoubleNestedLayer(8)(inputs)
model = Model(inputs, outputs)
model.quantize(mode)
if mode == "int8":
kernel_count = 0
for weight in model.weights:
if weight.name == "kernel":
kernel_count += 1
self.assertEqual(
backend.standardize_dtype(weight.dtype), "int8"
)
self.assertEqual(kernel_count, 3)
if mode == "float8":
# kernel + bias + scale * 3 + amax_history * 3 == 8
self.assertEqual(len(model.weights), 3 * 8)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/variable_mapping.py | keras/src/models/variable_mapping.py | from keras.src.layers.layer import Layer
from keras.src.metrics.metric import Metric
from keras.src.optimizers.optimizer import Optimizer
from keras.src.saving import saving_lib
from keras.src.saving.keras_saveable import KerasSaveable
def map_saveable_variables(saveable, store, visited_saveables):
# If the saveable has already been seen, skip it.
if id(saveable) in visited_saveables:
return
visited_saveables.add(id(saveable))
variables = []
if isinstance(saveable, Layer):
variables = (
saveable._trainable_variables + saveable._non_trainable_variables
)
elif isinstance(saveable, Optimizer):
variables = saveable._variables
elif isinstance(saveable, Metric):
variables = saveable._variables
for v in variables:
if v.path in store:
raise ValueError(
"The model contains two variables with a duplicate path: "
f"path='{v.path}' appears at least twice. "
f"This path is used for {v} and for {store[v.path]}. "
"In order to get a variable map, make sure to use "
"unique paths/names for each variable."
)
store[v.path] = v
# Recursively save state of children saveables (layers, optimizers, etc.)
for child_attr, child_obj in saving_lib._walk_saveable(saveable):
if isinstance(child_obj, KerasSaveable):
map_saveable_variables(
child_obj,
store,
visited_saveables=visited_saveables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
map_container_variables(
child_obj,
store,
visited_saveables=visited_saveables,
)
def map_container_variables(container, store, visited_saveables):
if isinstance(container, dict):
container = list(container.values())
for saveable in container:
if isinstance(saveable, KerasSaveable):
map_saveable_variables(
saveable,
store,
visited_saveables=visited_saveables,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/__init__.py | keras/src/models/__init__.py | from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/sequential.py | keras/src/models/sequential.py | import copy
import inspect
import typing
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.backend.common import standardize_shape
from keras.src.layers.core.input_layer import InputLayer
from keras.src.layers.layer import Layer
from keras.src.legacy.saving import saving_utils
from keras.src.legacy.saving import serialization as legacy_serialization
from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.saving import serialization_lib
@keras_export(["keras.Sequential", "keras.models.Sequential"])
class Sequential(Model):
"""`Sequential` groups a linear stack of layers into a `Model`.
Examples:
```python
model = keras.Sequential()
model.add(keras.Input(shape=(16,)))
model.add(keras.layers.Dense(8))
# Note that you can also omit the initial `Input`.
# In that case the model doesn't have any weights until the first call
# to a training/evaluation method (since it isn't yet built):
model = keras.Sequential()
model.add(keras.layers.Dense(8))
model.add(keras.layers.Dense(4))
# model.weights not created yet
# Whereas if you specify an `Input`, the model gets built
# continuously as you are adding layers:
model = keras.Sequential()
model.add(keras.Input(shape=(16,)))
model.add(keras.layers.Dense(8))
len(model.weights) # Returns "2"
# When using the delayed-build pattern (no input shape specified), you can
# choose to manually build your model by calling
# `build(batch_input_shape)`:
model = keras.Sequential()
model.add(keras.layers.Dense(8))
model.add(keras.layers.Dense(4))
model.build((None, 16))
len(model.weights) # Returns "4"
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = keras.Sequential()
model.add(keras.layers.Dense(8))
model.add(keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
def __new__(cls, *args, **kwargs):
return typing.cast(cls, super().__new__(cls))
def __init__(self, layers=None, trainable=True, name=None):
super().__init__(trainable=trainable, name=name)
self._functional = None
self._layers = []
if layers:
for layer in layers:
self.add(layer, rebuild=False)
self._maybe_rebuild()
def add(self, layer, rebuild=True):
"""Adds a layer instance on top of the layer stack.
Args:
layer: layer instance.
"""
# Legacy case: if the first layer has an input_shape arg,
# use it to build an InputLayer.
if not self._layers:
if getattr(layer, "_input_shape_arg", None) is not None:
self.add(InputLayer(shape=layer._input_shape_arg))
# If we are passed a Keras tensor created by keras.Input(), we
# extract the input layer from its keras history and use that.
if hasattr(layer, "_keras_history"):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, InputLayer):
layer = origin_layer
if not isinstance(layer, Layer):
raise ValueError(
"Only instances of `keras.Layer` can be "
f"added to a Sequential model. Received: {layer} "
f"(of type {type(layer)})"
)
if not self._is_layer_name_unique(layer):
raise ValueError(
"All layers added to a Sequential model "
f"should have unique names. Name '{layer.name}' is already "
"the name of a layer in this model. Update the `name` argument "
"to pass a unique name."
)
if (
isinstance(layer, InputLayer)
and self._layers
and isinstance(self._layers[0], InputLayer)
):
raise ValueError(
f"Sequential model '{self.name}' has already been configured "
f"to use input shape {self._layers[0].batch_shape}. You cannot "
f"add a different Input layer to it."
)
self._layers.append(layer)
if rebuild:
self._maybe_rebuild()
else:
self.built = False
self._functional = None
def pop(self, rebuild=True):
"""Removes the last layer in the model.
Args:
rebuild: `bool`. Whether to rebuild the model after removing
the layer. Defaults to `True`.
Returns:
layer: layer instance.
"""
layer = self._layers.pop()
self.built = False
self._functional = None
if rebuild:
self._maybe_rebuild()
return layer
def _maybe_rebuild(self):
self.built = False
self._functional = None
if isinstance(self._layers[0], InputLayer) and len(self._layers) > 1:
input_shape = self._layers[0].batch_shape
self.build(input_shape)
elif hasattr(self._layers[0], "input_shape") and len(self._layers) > 1:
# We can build the Sequential model if the first layer has the
# `input_shape` property. This is most commonly found in Functional
# model.
input_shape = self._layers[0].input_shape
self.build(input_shape)
def _lock_state(self):
# Unlike other layers, Sequential is mutable after build.
pass
def _obj_type(self):
return "Sequential"
def build(self, input_shape=None):
try:
input_shape = standardize_shape(input_shape)
except:
# Do not attempt to build if the model does not have a single
# input tensor.
return
if not self._layers:
raise ValueError(
f"Sequential model {self.name} cannot be built because it has "
"no layers. Call `model.add(layer)`."
)
if isinstance(self._layers[0], InputLayer):
if self._layers[0].batch_shape != input_shape:
raise ValueError(
f"Sequential model '{self.name}' has already been "
"configured to use input shape "
f"{self._layers[0].batch_shape}. You cannot build it "
f"with input_shape {input_shape}"
)
else:
dtype = self._layers[0].compute_dtype
self._layers = [
InputLayer(batch_shape=input_shape, dtype=dtype)
] + self._layers
# Build functional model
inputs = self._layers[0].output
x = inputs
for layer in self._layers[1:]:
try:
x = layer(x)
except NotImplementedError:
# Can happen if shape inference is not implemented.
# TODO: consider reverting inbound nodes on layers processed.
return
except TypeError as e:
signature = inspect.signature(layer.call)
positional_args = [
param
for param in signature.parameters.values()
if param.default == inspect.Parameter.empty
]
if len(positional_args) != 1:
raise ValueError(
"Layers added to a Sequential model "
"can only have a single positional argument, "
f"the input tensor. Layer {layer.__class__.__name__} "
f"has multiple positional arguments: {positional_args}"
)
raise e
outputs = x
self._functional = Functional(inputs=inputs, outputs=outputs)
def call(self, inputs, training=None, mask=None, **kwargs):
if self._functional:
return self._functional.call(
inputs, training=training, mask=mask, **kwargs
)
# Fallback: Just apply the layer sequence.
# This typically happens if `inputs` is a nested struct.
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and
# `outputs` are the outputs of `layer` applied to `inputs`. At the
# end of each iteration `inputs` is set to `outputs` to prepare for
# the next layer.
layer_kwargs = {
k: kwargs[k]
# only inject if this layer’s signature actually has that arg
for k in getattr(layer, "_call_has_context_arg", {})
if k in kwargs
}
if layer._call_has_mask_arg:
layer_kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
layer_kwargs["training"] = training
outputs = layer(inputs, **layer_kwargs)
inputs = outputs
mask = tree.map_structure(backend.get_keras_mask, outputs)
return outputs
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
layers = self._layers
if layers and isinstance(layers[0], InputLayer):
return layers[1:]
return layers[:]
@layers.setter
def layers(self, _):
raise AttributeError(
"`Sequential.layers` attribute is reserved and should not be used. "
"Use `add()` and `pop()` to change the layers in this model."
)
def compute_output_spec(self, inputs, training=None, mask=None, **kwargs):
if self._functional:
return self._functional.compute_output_spec(
inputs, training=training, mask=mask, **kwargs
)
# Direct application
for layer in self.layers:
outputs = layer.compute_output_spec(
inputs,
training=training,
**kwargs,
) # Ignore mask
inputs = outputs
return outputs
def compute_output_shape(self, input_shape):
if self._functional:
return self._functional.compute_output_shape(input_shape)
# Direct application
for layer in self.layers:
output_shape = layer.compute_output_shape(input_shape)
input_shape = output_shape
return output_shape
@property
def input_shape(self):
if self._functional:
return self._functional.input_shape
raise AttributeError(
f"Sequential model '{self.name}' has no defined input shape yet."
)
@property
def output_shape(self):
if self._functional:
return self._functional.output_shape
raise AttributeError(
f"Sequential model '{self.name}' has no defined output shape yet."
)
@property
def inputs(self):
if self._functional:
return self._functional.inputs
raise AttributeError(
f"Sequential model '{self.name}' has no defined inputs yet."
)
@property
def outputs(self):
if self._functional:
return self._functional.outputs
raise AttributeError(
f"Sequential model '{self.name}' has no defined outputs yet."
)
@property
def input_dtype(self):
# Sequential.__call__ will try to convert its inputs
# to the dtype expected by its input layer, if any.
layers = self._layers
if layers and isinstance(layers[0], InputLayer):
return layers[0].dtype
return super().input_dtype
def _is_layer_name_unique(self, layer):
for ref_layer in self._layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def get_config(self):
serialize_fn = serialization_lib.serialize_keras_object
if global_state.get_global_attribute("use_legacy_config", False):
# Legacy format serialization used for H5 and SavedModel formats
serialize_fn = legacy_serialization.serialize_keras_object
layer_configs = []
for layer in super().layers:
# `super().layers` include the InputLayer if available (it is
# filtered out of `self.layers`).
layer_configs.append(serialize_fn(layer))
config = Model.get_config(self)
config["name"] = self.name
config["layers"] = copy.deepcopy(layer_configs)
if self._functional is not None:
config["build_input_shape"] = self._layers[0].batch_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if "name" in config:
name = config["name"]
build_input_shape = config.get("build_input_shape")
layer_configs = config["layers"]
else:
name = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
if "module" not in layer_config:
# Legacy format deserialization (no "module" key)
# used for H5 and SavedModel formats
layer = saving_utils.model_from_config(
layer_config,
custom_objects=custom_objects,
)
else:
layer = serialization_lib.deserialize_keras_object(
layer_config,
custom_objects=custom_objects,
)
model.add(layer)
if (
not model._functional
and "build_input_shape" in locals()
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
model.build(build_input_shape)
return model
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/models/sequential_test.py | keras/src/models/sequential_test.py | import pickle
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import saving
from keras.src import testing
from keras.src.layers.core.input_layer import Input
from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
@pytest.mark.requires_trainable_backend
class SequentialTest(testing.TestCase):
def test_basic_flow_with_input(self):
model = Sequential(name="seq")
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.summary()
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
# Test eager call
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(type(model._functional), Functional)
self.assertEqual(y.shape, (3, 5))
# Test symbolic call
x = backend.KerasTensor((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test `layers` constructor arg
model = Sequential(
layers=[
Input(shape=(2,), batch_size=3),
layers.Dense(4),
layers.Dense(5),
]
)
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test pop
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 4))
def test_legacy_flow_with_input_shape(self):
model = Sequential(name="seq")
model.add(layers.Dense(4, input_shape=(2,)))
model.add(layers.Dense(5))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
# Input_dim works too
model = Sequential(name="seq")
model.add(layers.Dense(4, input_dim=2))
model.add(layers.Dense(5))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
# Subsequent input_shapes are ignored
model = Sequential(name="seq")
model.add(layers.Dense(4, input_shape=(2,)))
model.add(layers.Dense(5, input_shape=(3, 4)))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
def test_basic_flow_deferred(self):
model = Sequential(name="seq")
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.summary()
self.assertEqual(len(model.layers), 2)
# Test eager call
x = np.random.random((3, 2))
y = model(x)
self.assertTrue(model.built)
model.summary()
self.assertEqual(type(model._functional), Functional)
self.assertEqual(y.shape, (3, 5))
# Test symbolic call
x = backend.KerasTensor((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test `layers` constructor arg
model = Sequential(
layers=[
layers.Dense(4),
layers.Dense(5),
]
)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test pop
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 4))
def test_basic_flow_as_a_submodel(self):
# Build submodel
submodel = Sequential()
submodel.add(layers.Flatten())
self.assertFalse(submodel.built)
inputs = Input((None, 4))
outputs = layers.TimeDistributed(submodel)(inputs)
model = Model(inputs=inputs, outputs=outputs)
x = np.random.random((2, 3, 4))
y = model(x)
self.assertEqual(y.shape, (2, 3, 4))
def test_basic_flow_with_functional_model_as_first_layer(self):
# Build functional model
inputs = Input((16, 16, 3))
outputs = layers.Conv2D(4, 3, padding="same")(inputs)
functional_model = Model(inputs=inputs, outputs=outputs)
model = Sequential(
[functional_model, layers.Flatten(), layers.Dense(1)]
)
model.summary()
self.assertEqual(len(model.layers), 3)
self.assertTrue(model.built)
for layer in model.layers:
self.assertTrue(layer.built)
# Test eager call
x = np.random.random((1, 16, 16, 3))
y = model(x)
self.assertEqual(type(model._functional), Functional)
self.assertEqual(tuple(y.shape), (1, 1))
# Test symbolic call
x = backend.KerasTensor((1, 16, 16, 3))
y = model(x)
self.assertEqual(y.shape, (1, 1))
def test_basic_flow_with_sequential_model_as_first_layer(self):
# Build sequential model
sequential_model = Sequential(
[Input((16, 16, 3)), layers.Conv2D(4, 3, padding="same")]
)
model = Sequential(
[sequential_model, layers.Flatten(), layers.Dense(1)]
)
model.summary()
self.assertEqual(len(model.layers), 3)
self.assertTrue(model.built)
for layer in model.layers:
self.assertTrue(layer.built)
# Test eager call
x = np.random.random((1, 16, 16, 3))
y = model(x)
self.assertEqual(type(model._functional), Functional)
self.assertEqual(tuple(y.shape), (1, 1))
# Test symbolic call
x = backend.KerasTensor((1, 16, 16, 3))
y = model(x)
self.assertEqual(y.shape, (1, 1))
def test_dict_inputs(self):
class DictLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, dict)
return inputs
model = Sequential([DictLayer()])
x = {"a": np.random.random((3, 2)), "b": np.random.random((3, 2))}
y = model(x)
self.assertEqual(type(y), dict)
model.summary()
def test_list_inputs(self):
class ListLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, list)
return inputs
model = Sequential([ListLayer()])
x = [np.random.random((3, 2)), np.random.random((3, 2))]
y = model(x)
self.assertEqual(type(y), list)
model.summary()
def test_nested_sequential(self):
# https://github.com/keras-team/keras/issues/20203
model = Sequential()
model.add(Input(shape=(16,)))
Sequential([model])
def test_errors(self):
# Trying to pass 2 Inputs
model = Sequential()
model.add(Input(shape=(2,), batch_size=3))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.add(Input(shape=(2,), batch_size=3))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.add(layers.InputLayer(shape=(2,), batch_size=3))
# Same name 2x
model = Sequential()
model.add(layers.Dense(2, name="dense"))
with self.assertRaisesRegex(ValueError, "should have unique names"):
model.add(layers.Dense(2, name="dense"))
# No layers
model = Sequential()
x = np.random.random((3, 2))
with self.assertRaisesRegex(ValueError, "no layers"):
model(x)
# Build conflict
model = Sequential()
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(2))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.build((3, 4))
# But this works
model.build((3, 2))
def test_shape_inference_failure(self):
class DynamicLayer(layers.Layer):
def call(self, inputs):
return inputs + 1.0
def compute_output_spec(self, *args, **kwargs):
raise NotImplementedError
model = Sequential([DynamicLayer()])
x = np.random.random((3, 2))
y = model(x)
self.assertAllClose(y, x + 1)
model.summary()
def test_serialization(self):
# Unbuilt deferred
model = Sequential(name="seq")
model.add(layers.Dense(4))
model.add(layers.Dense(5))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 2)
# Built deferred
model.build((2, 3))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 2)
# Regular
model = Sequential(name="seq")
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.add(layers.Dense(6))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 3)
# Weird
class DictLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, dict)
return inputs
model = Sequential([DictLayer()])
revived = self.run_class_serialization_test(
model, custom_objects={"DictLayer": DictLayer}
)
self.assertLen(revived.layers, 1)
def test_serialization_with_lambda_layer(self):
# https://github.com/keras-team/keras/issues/20074
inputs = np.random.random(size=(1, 10, 4)).astype("float32")
CONV_WIDTH = 3
model = Sequential([layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :])])
outputs = model(inputs)
temp = self.get_temp_dir()
save_path = f"{temp}/model.keras"
model.save(save_path)
revived = saving.load_model(save_path, safe_mode=False)
revived_outputs = revived(inputs)
self.assertLen(revived.layers, 1)
self.assertAllClose(revived_outputs, outputs)
def test_functional_properties(self):
model = Sequential(name="seq")
inputs = Input(shape=(2,))
model.add(inputs)
model.add(layers.Dense(4))
self.assertEqual(model.inputs, [inputs])
self.assertEqual(model.outputs, [model.layers[-1].output])
self.assertEqual(model.input_shape, (None, 2))
self.assertEqual(model.output_shape, (None, 4))
def test_pickleable(self):
model = Sequential(name="seq")
model.add(layers.Dense(4))
result = pickle.loads(pickle.dumps(model))
assert len(result.layers) == 1
def test_bad_layer(self):
model = Sequential(name="seq")
with self.assertRaisesRegex(ValueError, "Only instances of"):
model.add({})
model = Sequential(name="seq")
class BadLayer(layers.Layer):
def call(self, inputs, training):
return inputs
model.add(BadLayer())
with self.assertRaisesRegex(
ValueError, "can only have a single positional"
):
model.build((None, 2))
def test_compute_output_shape(self):
layer = Sequential([layers.Dense(4), layers.Dense(8)])
output_shape = layer.compute_output_shape((1, 2))
self.assertEqual(output_shape, (1, 8))
def test_hasattr(self):
model = Sequential()
self.assertFalse(hasattr(model, "input_shape"))
self.assertFalse(hasattr(model, "output_shape"))
self.assertFalse(hasattr(model, "inputs"))
self.assertFalse(hasattr(model, "outputs"))
model = Sequential([layers.Input((4,)), layers.Dense(8)])
self.assertTrue(hasattr(model, "input_shape"))
self.assertTrue(hasattr(model, "output_shape"))
self.assertTrue(hasattr(model, "inputs"))
self.assertTrue(hasattr(model, "outputs"))
def test_layers_setter(self):
model = Sequential()
with self.assertRaisesRegex(
AttributeError, r"Use `add\(\)` and `pop\(\)`"
):
model.layers = [layers.Dense(4)]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/efficientnet.py | keras/src/applications/efficientnet.py | import copy
import math
from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/keras-applications/"
WEIGHTS_HASHES = {
"b0": (
"902e53a9f72be733fc0bcb005b3ebbac",
"50bc09e76180e00e4465e1a485ddc09d",
),
"b1": (
"1d254153d4ab51201f1646940f018540",
"74c4e6b3e1f6a1eea24c589628592432",
),
"b2": (
"b15cce36ff4dcbd00b6dd88e7857a6ad",
"111f8e2ac8aa800a7a99e3239f7bfb39",
),
"b3": (
"ffd1fdc53d0ce67064dc6a9c7960ede0",
"af6d107764bb5b1abb91932881670226",
),
"b4": (
"18c95ad55216b8f92d7e70b3a046e2fc",
"ebc24e6d6c33eaebbd558eafbeedf1ba",
),
"b5": (
"ace28f2a6363774853a83a0b21b9421a",
"38879255a25d3c92d5e44e04ae6cec6f",
),
"b6": (
"165f6e37dce68623721b423839de8be5",
"9ecce42647a20130c1f39a5d4cb75743",
),
"b7": (
"8c03f828fec3ef71311cd463b6759d99",
"cbcfe4450ddf6f3ad90b1b398090fe4a",
),
}
DEFAULT_BLOCKS_ARGS = [
{
"kernel_size": 3,
"repeats": 1,
"filters_in": 32,
"filters_out": 16,
"expand_ratio": 1,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 2,
"filters_in": 16,
"filters_out": 24,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 2,
"filters_in": 24,
"filters_out": 40,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 3,
"filters_in": 40,
"filters_out": 80,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 3,
"filters_in": 80,
"filters_out": 112,
"expand_ratio": 6,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 4,
"filters_in": 112,
"filters_out": 192,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 1,
"filters_in": 192,
"filters_out": 320,
"expand_ratio": 6,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
]
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0 / 3.0,
"mode": "fan_out",
"distribution": "uniform",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For EfficientNet, input preprocessing is included as part of the model
(as a `Rescaling` layer), and thus
`keras.applications.efficientnet.preprocess_input` is actually a
pass-through function. EfficientNet models expect their inputs to be float
tensors of pixels with values in the `[0-255]` range.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to `None`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. 1000 is how many
ImageNet classes there are. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `'softmax'`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
IMAGENET_STDDEV_RGB = [0.229, 0.224, 0.225]
def EfficientNet(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation="swish",
blocks_args="default",
name="efficientnet",
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
weights_name=None,
):
"""Instantiates the EfficientNet architecture.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A model instance.
"""
if blocks_args == "default":
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(
divisor, int(filters + divisor / 2) // divisor * divisor
)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1.0 / 255.0)(x)
x = layers.Normalization(axis=bn_axis)(x)
if weights == "imagenet":
# Note that the normalization layer uses square value of STDDEV as the
# variance for the layer: result = (input - mean) / sqrt(var)
# However, the original implementation uses (input - mean) / var to
# normalize the input, we need to divide another sqrt(var) to match the
# original implementation.
# See https://github.com/tensorflow/tensorflow/issues/49930 for more
# details
x = layers.Rescaling(
[1.0 / math.sqrt(stddev) for stddev in IMAGENET_STDDEV_RGB]
)(x)
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3), name="stem_conv_pad"
)(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name="stem_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name="stem_bn")(x)
x = layers.Activation(activation, name="stem_activation")(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args["repeats"]) for args in blocks_args))
for i, args in enumerate(blocks_args):
assert args["repeats"] > 0
# Update block input and output filters based on depth multiplier.
args["filters_in"] = round_filters(args["filters_in"])
args["filters_out"] = round_filters(args["filters_out"])
for j in range(round_repeats(args.pop("repeats"))):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
args["strides"] = 1
args["filters_in"] = args["filters_out"]
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name=f"block{i + 1}{chr(j + 97)}_",
**args,
)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name="top_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name="top_bn")(x)
x = layers.Activation(activation, name="top_activation")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[weights_name][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[weights_name][1]
file_name = name + file_suffix
weights_path = file_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block(
inputs,
activation="swish",
drop_rate=0.0,
name="",
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.0,
id_skip=True,
):
"""An inverted residual block.
Args:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}expand_conv",
)(inputs)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}expand_bn")(x)
x = layers.Activation(activation, name=f"{name}expand_activation")(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=f"{name}dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}dwconv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}bn")(x)
x = layers.Activation(activation, name=f"{name}activation")(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_expand",
)(se)
x = layers.multiply([x, se], name=f"{name}se_excite")
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}project_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}project_bn")(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=f"{name}drop"
)(x)
x = layers.add([x, inputs], name=f"{name}add")
return x
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB0",
"keras.applications.EfficientNetB0",
]
)
def EfficientNetB0(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb0",
):
return EfficientNet(
1.0,
1.0,
224,
0.2,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b0",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB1",
"keras.applications.EfficientNetB1",
]
)
def EfficientNetB1(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb1",
):
return EfficientNet(
1.0,
1.1,
240,
0.2,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b1",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB2",
"keras.applications.EfficientNetB2",
]
)
def EfficientNetB2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb2",
):
return EfficientNet(
1.1,
1.2,
260,
0.3,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b2",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB3",
"keras.applications.EfficientNetB3",
]
)
def EfficientNetB3(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb3",
):
return EfficientNet(
1.2,
1.4,
300,
0.3,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b3",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB4",
"keras.applications.EfficientNetB4",
]
)
def EfficientNetB4(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb4",
):
return EfficientNet(
1.4,
1.8,
380,
0.4,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b4",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB5",
"keras.applications.EfficientNetB5",
]
)
def EfficientNetB5(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb5",
):
return EfficientNet(
1.6,
2.2,
456,
0.4,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b5",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB6",
"keras.applications.EfficientNetB6",
]
)
def EfficientNetB6(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb6",
):
return EfficientNet(
1.8,
2.6,
528,
0.5,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b6",
)
@keras_export(
[
"keras.applications.efficientnet.EfficientNetB7",
"keras.applications.EfficientNetB7",
]
)
def EfficientNetB7(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="efficientnetb7",
):
return EfficientNet(
2.0,
3.1,
600,
0.5,
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
weights_name="b7",
)
EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB0")
EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB1")
EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB2")
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB3")
EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB4")
EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB5")
EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB6")
EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB7")
@keras_export("keras.applications.efficientnet.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the efficientnet model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array. `None`
means the global setting `keras.backend.image_data_format()`
is used (unless you changed it, it uses `"channels_last"`).
Defaults to `None`.
Returns:
Unchanged `numpy.array` or tensor.
"""
return x
@keras_export("keras.applications.efficientnet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/mobilenet_v3.py | keras/src/applications/mobilenet_v3.py | import warnings
from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/"
)
WEIGHTS_HASHES = {
"large_224_0.75_float": (
"765b44a33ad4005b3ac83185abf1d0eb",
"40af19a13ebea4e2ee0c676887f69a2e",
),
"large_224_1.0_float": (
"59e551e166be033d707958cf9e29a6a7",
"07fb09a5933dd0c8eaafa16978110389",
),
"large_minimalistic_224_1.0_float": (
"675e7b876c45c57e9e63e6d90a36599c",
"ec5221f64a2f6d1ef965a614bdae7973",
),
"small_224_0.75_float": (
"cb65d4e5be93758266aa0a7f2c6708b7",
"ebdb5cc8e0b497cd13a7c275d475c819",
),
"small_224_1.0_float": (
"8768d4c2e7dee89b9d02b2d03d65d862",
"d3e8ec802a04aa4fc771ee12a9a9b836",
),
"small_minimalistic_224_1.0_float": (
"99cd97fb2fcdad2bf028eb838de69e37",
"cde8136e733e811080d9fcd8a252f7e4",
),
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Searching for MobileNetV3](
https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
The following table describes the performance of MobileNets v3:
------------------------------------------------------------------------
MACs stands for Multiply Adds
|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|
|---|---|---|---|---|
| mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |
| mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |
| mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |
| mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |
| mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |
| mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV3, by default input preprocessing is included as a part of the
model (as a `Rescaling` layer), and thus
`keras.applications.mobilenet_v3.preprocess_input` is actually a
pass-through function. In this use case, MobileNetV3 models expect their
inputs to be float tensors of pixels with values in the `[0-255]` range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to `False`.
With preprocessing disabled MobileNetV3 models expect their inputs to be float
tensors of pixels with values in the `[-1, 1]` range.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
`(224, 224, 3)`.
It should have exactly 3 inputs channels.
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
When `weights` is `imagenet`, `alpha` can be one of `0.75` or `1.0`
for non-minimalistic models, and `1.0` for minimalistic models.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer.
minimalistic: In addition to large and small models this module also
contains so-called minimalistic models, these models have the same
per-layer dimensions characteristic as MobilenetV3 however, they don't
utilize any of the advanced blocks (squeeze-and-excite units,
hard-swish, and 5x5 convolutions).
While these models are less efficient on CPU, they
are much more performant on GPU/DSP.
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Integer, optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
dropout_rate: fraction of the input units to drop on the last layer.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network. Defaults to `True`.
name: String, the name of the model.
Call arguments:
inputs: A floating point `numpy.array` or backend-native tensor,
4D with 3 color channels, with values in the range `[0, 255]`
if `include_preprocessing` is `True` and in the range `[-1, 1]`
otherwise.
Returns:
A model instance.
"""
def MobileNetV3(
stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type="large",
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
name=None,
):
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top` '
"as true, `classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
operation_utils.get_source_inputs(input_tensor)
)
except ValueError:
raise ValueError(
"input_tensor: ",
input_tensor,
"is not type input_tensor. "
f"Received type(input_tensor)={type(input_tensor)}",
)
if is_input_t_tensor:
if backend.image_data_format() == "channels_first":
if input_tensor.shape[1] != input_shape[1]:
raise ValueError(
"When backend.image_data_format()=channels_first, "
"input_shape[1] must equal "
"input_tensor.shape[1]. Received "
f"input_shape={input_shape}, "
"input_tensor.shape="
f"{input_tensor.shape}"
)
else:
if input_tensor.shape[2] != input_shape[1]:
raise ValueError(
"input_shape[1] must equal "
"input_tensor.shape[2]. Received "
f"input_shape={input_shape}, "
"input_tensor.shape="
f"{input_tensor.shape}"
)
else:
raise ValueError(
"input_tensor specified: ",
input_tensor,
"is not a keras tensor",
)
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError(
"input_tensor: ",
input_tensor,
"is type: ",
type(input_tensor),
"which is not a valid type",
)
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == "channels_first":
rows = input_tensor.shape[2]
cols = input_tensor.shape[3]
input_shape = (3, cols, rows)
else:
rows = input_tensor.shape[1]
cols = input_tensor.shape[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standard shape
if input_shape is None and input_tensor is None:
if backend.image_data_format() == "channels_last":
input_shape = (None, None, 3)
else:
input_shape = (3, None, None)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError(
"Input size must be at least 32x32; Received `input_shape="
f"{input_shape}`"
)
if weights == "imagenet":
if (
not minimalistic
and alpha not in [0.75, 1.0]
or minimalistic
and alpha != 1.0
):
raise ValueError(
"If imagenet weights are being loaded, "
"alpha can be one of `0.75`, `1.0` for non minimalistic "
"or `1.0` for minimalistic only."
)
if rows != cols or rows != 224:
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not 224. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = img_input
if include_preprocessing:
x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x)
x = layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="conv",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="conv_bn"
)(x)
x = activation(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(x.shape[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding="same",
use_bias=False,
name="conv_1",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="conv_1_bn"
)(x)
x = activation(x)
if include_top:
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Conv2D(
last_point_ch,
kernel_size=1,
padding="same",
use_bias=True,
name="conv_2",
)(x)
x = activation(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(
classes, kernel_size=1, padding="same", name="logits"
)(x)
x = layers.Flatten()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(
activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
model_name = "{}{}_224_{}_float".format(
model_type, "_minimalistic" if minimalistic else "", str(alpha)
)
if include_top:
file_name = f"weights_mobilenet_v3_{model_name}.h5"
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = f"weights_mobilenet_v3_{model_name}_no_top_v2.h5"
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = file_utils.get_file(
file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.MobileNetV3Small")
def MobileNetV3Small(
input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
name="MobileNetV3Small",
):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(
x, 4, depth(40), kernel, 2, se_ratio, activation, 3
)
x = _inverted_res_block(
x, 6, depth(40), kernel, 1, se_ratio, activation, 4
)
x = _inverted_res_block(
x, 6, depth(40), kernel, 1, se_ratio, activation, 5
)
x = _inverted_res_block(
x, 3, depth(48), kernel, 1, se_ratio, activation, 6
)
x = _inverted_res_block(
x, 3, depth(48), kernel, 1, se_ratio, activation, 7
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 2, se_ratio, activation, 8
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 1, se_ratio, activation, 9
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 1, se_ratio, activation, 10
)
return x
return MobileNetV3(
stack_fn,
1024,
input_shape,
alpha,
"small",
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
classifier_activation,
include_preprocessing,
name=name,
)
@keras_export("keras.applications.MobileNetV3Large")
def MobileNetV3Large(
input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
name="MobileNetV3Large",
):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(
x, 6, depth(112), 3, 1, se_ratio, activation, 10
)
x = _inverted_res_block(
x, 6, depth(112), 3, 1, se_ratio, activation, 11
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 2, se_ratio, activation, 12
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 1, se_ratio, activation, 13
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 1, se_ratio, activation, 14
)
return x
return MobileNetV3(
stack_fn,
1280,
input_shape,
alpha,
"large",
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
classifier_activation,
include_preprocessing,
name=name,
)
MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Small")
MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Large")
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Activation("hard_swish")(x)
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(
keepdims=True, name=f"{prefix}squeeze_excite_avg_pool"
)(inputs)
x = layers.Conv2D(
_depth(filters * se_ratio),
kernel_size=1,
padding="same",
name=f"{prefix}squeeze_excite_conv",
)(x)
x = layers.ReLU(name=f"{prefix}squeeze_excite_relu")(x)
x = layers.Conv2D(
filters,
kernel_size=1,
padding="same",
name=f"{prefix}squeeze_excite_conv_1",
)(x)
x = hard_sigmoid(x)
x = layers.Multiply(name=f"{prefix}squeeze_excite_mul")([inputs, x])
return x
def _inverted_res_block(
x, expansion, filters, kernel_size, stride, se_ratio, activation, block_id
):
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
shortcut = x
prefix = "expanded_conv_"
infilters = x.shape[channel_axis]
if block_id:
# Expand
prefix = f"expanded_conv_{block_id}_"
x = layers.Conv2D(
_depth(infilters * expansion),
kernel_size=1,
padding="same",
use_bias=False,
name=f"{prefix}expand",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}expand_bn",
)(x)
x = activation(x)
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=f"{prefix}depthwise_pad",
)(x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding="same" if stride == 1 else "valid",
use_bias=False,
name=f"{prefix}depthwise",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}depthwise_bn",
)(x)
x = activation(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(
filters,
kernel_size=1,
padding="same",
use_bias=False,
name=f"{prefix}project",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}project_bn",
)(x)
if stride == 1 and infilters == filters:
x = layers.Add(name=f"{prefix}add")([shortcut, x])
return x
@keras_export("keras.applications.mobilenet_v3.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the mobilenet_v3 model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array.
`None` means the global setting
`keras.config.image_data_format()` is used
(unless you changed it, it uses `"channels_last"`).
Defaults to `None`.
Returns:
Unchanged `numpy.array` or tensor.
"""
return x
@keras_export("keras.applications.mobilenet_v3.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/mobilenet_v2.py | keras/src/applications/mobilenet_v2.py | import warnings
from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/"
)
@keras_export(
[
"keras.applications.mobilenet_v2.MobileNetV2",
"keras.applications.MobileNetV2",
]
)
def MobileNetV2(
input_shape=None,
alpha=1.0,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name=None,
):
"""Instantiates the MobileNetV2 architecture.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV2, call
`keras.applications.mobilenet_v2.preprocess_input`
on your inputs before passing them to the model.
`mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format).
It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would
be one valid value. Defaults to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer. Defaults to `1.0`.
include_top: Boolean, whether to include the fully-connected layer
at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet), or the path to the weights file
to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful
for sharing inputs between multiple different networks.
Defaults to `None`.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into,
only to be specified if `include_top` is `True`, and if
no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function
to use on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: String, the name of the model.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received `weights={weights}`"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top` '
f"as true, `classes` should be 1000. Received `classes={classes}`"
)
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
operation_utils.get_source_inputs(input_tensor)
)
except ValueError:
raise ValueError(
f"input_tensor: {input_tensor}"
"is not type input_tensor. "
f"Received `type(input_tensor)={type(input_tensor)}`"
)
if is_input_t_tensor:
if backend.image_data_format() == "channels_first":
if input_tensor.shape[1] != input_shape[1]:
raise ValueError(
"input_shape[1] must equal shape(input_tensor)[1] "
"when `image_data_format` is `channels_first`; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`"
f", `input_shape={input_shape}`"
)
else:
if input_tensor.shape[2] != input_shape[1]:
raise ValueError(
"input_tensor.shape[2] must equal input_shape[1]; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`, "
f"`input_shape={input_shape}`"
)
else:
raise ValueError(
"input_tensor is not a Keras tensor; "
f"Received `input_tensor={input_tensor}`"
)
# If input_shape is None, infer shape from input_tensor.
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError(
"input_tensor must be a valid Keras tensor type; "
f"Received {input_tensor} of type {type(input_tensor)}"
)
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == "channels_first":
rows = input_tensor.shape[2]
cols = input_tensor.shape[3]
else:
rows = input_tensor.shape[1]
cols = input_tensor.shape[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size.
else:
if backend.image_data_format() == "channels_first":
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == "imagenet":
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError(
"If imagenet weights are being loaded, "
"alpha must be one of `0.35`, `0.50`, `0.75`, "
"`1.0`, `1.3` or `1.4` only;"
f" Received `alpha={alpha}`"
)
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not in [96, 128, 160, 192, 224]. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.Conv2D(
first_block_filters,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="Conv1",
)(img_input)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="bn_Conv1"
)(x)
x = layers.ReLU(6.0, name="Conv1_relu")(x)
x = _inverted_res_block(
x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15
)
x = _inverted_res_block(
x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16
)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we increase the number of output
# channels.
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(
last_block_filters, kernel_size=1, use_bias=False, name="Conv_1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1_bn"
)(x)
x = layers.ReLU(6.0, name="out_relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account any potential predecessors of
# `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if name is None:
name = f"mobilenetv2_{alpha:0.2f}_{rows}"
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels"
f"_{float(alpha)}_{rows}.h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
else:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_"
f"{float(alpha)}_{rows}_no_top.h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Inverted ResNet block."""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
in_channels = inputs.shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
# Ensure the number of filters on the last 1x1 convolution is divisible by
# 8.
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = f"block_{block_id}_"
if block_id:
# Expand with a pointwise 1x1 convolution.
x = layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=f"{prefix}expand",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}expand_BN",
)(x)
x = layers.ReLU(6.0, name=f"{prefix}expand_relu")(x)
else:
prefix = "expanded_conv_"
# Depthwise 3x3 convolution.
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3), name=f"{prefix}pad"
)(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding="same" if stride == 1 else "valid",
name=f"{prefix}depthwise",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}depthwise_BN",
)(x)
x = layers.ReLU(6.0, name=f"{prefix}depthwise_relu")(x)
# Project with a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=f"{prefix}project",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=f"{prefix}project_BN",
)(x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=f"{prefix}add")([inputs, x])
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@keras_export("keras.applications.mobilenet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.mobilenet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/resnet_v2.py | keras/src/applications/resnet_v2.py | from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.applications import resnet
@keras_export(
[
"keras.applications.ResNet50V2",
"keras.applications.resnet_v2.ResNet50V2",
]
)
def ResNet50V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet50v2",
):
"""Instantiates the ResNet50V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 4, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 6, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
name=name,
weights_name="resnet50v2",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.ResNet101V2",
"keras.applications.resnet_v2.ResNet101V2",
]
)
def ResNet101V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet101v2",
):
"""Instantiates the ResNet101V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 4, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 23, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
name=name,
weights_name="resnet101v2",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.ResNet152V2",
"keras.applications.resnet_v2.ResNet152V2",
]
)
def ResNet152V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet152v2",
):
"""Instantiates the ResNet152V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 8, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 36, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
name=name,
weights_name="resnet152v2",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export("keras.applications.resnet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.resnet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Identity Mappings in Deep Residual Networks](
https://arxiv.org/abs/1603.05027) (CVPR 2016)
For image classification use cases, see [this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ResNet, call `keras.applications.resnet_v2.preprocess_input` on your
inputs before passing them to the model. `resnet_v2.preprocess_input` will
scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet), or the path to the weights
file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format). It should have exactly 3
inputs channels, and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the output
of the last convolutional block, and thus the output of the
model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is `True`, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Model instance.
"""
setattr(ResNet50V2, "__doc__", ResNet50V2.__doc__ + DOC)
setattr(ResNet101V2, "__doc__", ResNet101V2.__doc__ + DOC)
setattr(ResNet152V2, "__doc__", ResNet152V2.__doc__ + DOC)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/efficientnet_v2.py | keras/src/applications/efficientnet_v2.py | import copy
import math
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/" # noqa: E501
WEIGHTS_HASHES = {
"b0": (
"21ecbf6da12460d5c40bb2f29ceb2188",
"893217f2bb855e2983157299931e43ff",
),
"b1": (
"069f0534ff22adf035c89e2d9547a9dc",
"0e80663031ca32d657f9caa404b6ec37",
),
"b2": (
"424e49f28180edbde1e94797771950a7",
"1dfe2e7a5d45b6632553a8961ea609eb",
),
"b3": (
"1f1fc43bd98a6e4fd8fdfd551e02c7a0",
"f6abf7b5849ac99a89b50dd3fd532856",
),
"-s": (
"e1d88a8495beba45748fedd0cecbe016",
"af0682fb74e8c54910f2d4393339c070",
),
"-m": (
"a3bf6aa3276309f4fc6a34aa114c95cd",
"1b8dc055df72dde80d614482840fe342",
),
"-l": (
"27e6d408b53c7ebc868fefa357689935",
"b0b66b5c863aef5b46e8608fe1711615",
),
}
DEFAULT_BLOCKS_ARGS = {
"efficientnetv2-s": [
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 24,
"output_filters": 24,
"expand_ratio": 1,
"se_ratio": 0.0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 4,
"input_filters": 24,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0.0,
"strides": 2,
"conv_type": 1,
},
{
"conv_type": 1,
"expand_ratio": 4,
"input_filters": 48,
"kernel_size": 3,
"num_repeat": 4,
"output_filters": 64,
"se_ratio": 0,
"strides": 2,
},
{
"conv_type": 0,
"expand_ratio": 4,
"input_filters": 64,
"kernel_size": 3,
"num_repeat": 6,
"output_filters": 128,
"se_ratio": 0.25,
"strides": 2,
},
{
"conv_type": 0,
"expand_ratio": 6,
"input_filters": 128,
"kernel_size": 3,
"num_repeat": 9,
"output_filters": 160,
"se_ratio": 0.25,
"strides": 1,
},
{
"conv_type": 0,
"expand_ratio": 6,
"input_filters": 160,
"kernel_size": 3,
"num_repeat": 15,
"output_filters": 256,
"se_ratio": 0.25,
"strides": 2,
},
],
"efficientnetv2-m": [
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 24,
"output_filters": 24,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 24,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 48,
"output_filters": 80,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 80,
"output_filters": 160,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 14,
"input_filters": 160,
"output_filters": 176,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 18,
"input_filters": 176,
"output_filters": 304,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 304,
"output_filters": 512,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
],
"efficientnetv2-l": [
{
"kernel_size": 3,
"num_repeat": 4,
"input_filters": 32,
"output_filters": 32,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 32,
"output_filters": 64,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 64,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 10,
"input_filters": 96,
"output_filters": 192,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 19,
"input_filters": 192,
"output_filters": 224,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 25,
"input_filters": 224,
"output_filters": 384,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 384,
"output_filters": 640,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
],
"efficientnetv2-b0": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b1": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b2": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b3": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
}
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0 / 3.0,
"mode": "fan_out",
"distribution": "uniform",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](
https://arxiv.org/abs/2104.00298) (ICML 2021)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For EfficientNetV2, by default input preprocessing is included as a part of
the model (as a `Rescaling` layer), and thus
`keras.applications.efficientnet_v2.preprocess_input` is actually a
pass-through function. In this use case, EfficientNetV2 models expect their
inputs to be float tensors of pixels with values in the `[0, 255]` range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to `False`.
With preprocessing disabled EfficientNetV2 models expect their inputs to be
float tensors of pixels with values in the `[-1, 1]` range.
Args:
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is `False`.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `"avg"` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `"max"` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A string or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `"softmax"`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
minimum_depth = min_depth or depth_divisor
new_filters = max(
minimum_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def MBConvBlock(
input_filters,
output_filters,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability=0.8,
name=None,
):
"""MBConv block: Mobile Inverted Residual Bottleneck."""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
if name is None:
name = backend.get_uid("block0")
def apply(inputs):
# Expansion phase
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=f"{name}expand_conv",
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name=f"{name}expand_bn",
)(x)
x = layers.Activation(activation, name=f"{name}expand_activation")(
x
)
else:
x = inputs
# Depthwise conv
x = layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=f"{name}dwconv2",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=f"{name}bn"
)(x)
x = layers.Activation(activation, name=f"{name}activation")(x)
# Squeeze and excite
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_expand",
)(se)
x = layers.multiply([x, se], name=f"{name}se_excite")
# Output phase
x = layers.Conv2D(
filters=output_filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=f"{name}project_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=f"{name}project_bn"
)(x)
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
name=f"{name}drop",
)(x)
x = layers.add([x, inputs], name=f"{name}add")
return x
return apply
def FusedMBConvBlock(
input_filters,
output_filters,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability=0.8,
name=None,
):
"""Fuses the proj conv1x1 and depthwise_conv into a conv2d."""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
if name is None:
name = backend.get_uid("block0")
def apply(inputs):
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer=CONV_KERNEL_INITIALIZER,
data_format=backend.image_data_format(),
padding="same",
use_bias=False,
name=f"{name}expand_conv",
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=f"{name}expand_bn"
)(x)
x = layers.Activation(
activation=activation, name=f"{name}expand_activation"
)(x)
else:
x = inputs
# Squeeze and excite
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=f"{name}se_expand",
)(se)
x = layers.multiply([x, se], name=f"{name}se_excite")
# Output phase:
x = layers.Conv2D(
output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1 if expand_ratio != 1 else strides,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=False,
name=f"{name}project_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=f"{name}project_bn"
)(x)
if expand_ratio == 1:
x = layers.Activation(
activation=activation, name=f"{name}project_activation"
)(x)
# Residual:
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
name=f"{name}drop",
)(x)
x = layers.add([x, inputs], name=f"{name}add")
return x
return apply
def EfficientNetV2(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
min_depth=8,
bn_momentum=0.9,
activation="swish",
blocks_args="default",
name="efficientnetv2",
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
weights_name=None,
):
"""Instantiates the EfficientNetV2 architecture using given scaling
coefficients.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
min_depth: integer, minimum number of filters.
bn_momentum: float. Momentum parameter for Batch Normalization layers.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
name: string, model name.
include_top: whether to include the fully-connected layer at the top of
the network.
weights: one of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) or
numpy array to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is `False`. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the
4D tensor output of the last convolutional layer.
- "avg" means that global average pooling will be applied to
the output of the last convolutional layer,
and thus the output of the model will be a 2D tensor.
- `"max"` means that global max pooling will be applied.
classes: optional number of classes to classify images into,
only to be specified if `include_top` is `True`, and if no `weights`
argument is specified.
classifier_activation: A string or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network.
Defaults to `True`.
Returns:
A model instance.
"""
if blocks_args == "default":
blocks_args = DEFAULT_BLOCKS_ARGS[name]
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
f"Received: weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = img_input
if include_preprocessing:
# Apply original V1 preprocessing for Bx variants
# if number of channels allows it
num_channels = input_shape[bn_axis - 1]
if name.split("-")[-1].startswith("b") and num_channels == 3:
x = layers.Rescaling(scale=1.0 / 255)(x)
if backend.image_data_format() == "channels_first":
mean = [[[[0.485]], [[0.456]], [[0.406]]]] # shape [1,3,1,1]
variance = [
[[[0.229**2]], [[0.224**2]], [[0.225**2]]]
] # shape [1,3,1,1]
else:
mean = [0.485, 0.456, 0.406]
variance = [0.229**2, 0.224**2, 0.225**2]
x = layers.Normalization(
mean=mean,
variance=variance,
axis=bn_axis,
)(x)
else:
x = layers.Rescaling(scale=1.0 / 128.0, offset=-1)(x)
# Build stem
stem_filters = round_filters(
filters=blocks_args[0]["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=False,
name="stem_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name="stem_bn",
)(x)
x = layers.Activation(activation, name="stem_activation")(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(args["num_repeat"] for args in blocks_args))
for i, args in enumerate(blocks_args):
assert args["num_repeat"] > 0
# Update block input and output filters based on depth multiplier.
args["input_filters"] = round_filters(
filters=args["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
args["output_filters"] = round_filters(
filters=args["output_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
# Determine which conv type to use:
block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop("conv_type")]
repeats = round_repeats(
repeats=args.pop("num_repeat"), depth_coefficient=depth_coefficient
)
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
args["strides"] = 1
args["input_filters"] = args["output_filters"]
x = block(
activation=activation,
bn_momentum=bn_momentum,
survival_probability=drop_connect_rate * b / blocks,
name=f"block{i + 1}{chr(j + 97)}_",
**args,
)(x)
b += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = layers.Conv2D(
filters=top_filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name="top_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name="top_bn",
)(x)
x = layers.Activation(activation=activation, name="top_activation")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
bias_initializer=initializers.Constant(0.0),
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/resnet.py | keras/src/applications/resnet.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/"
)
WEIGHTS_HASHES = {
"resnet50": (
"2cb95161c43110f7111970584f804107",
"4d473c1dd8becc155b73f8504c6f6626",
),
"resnet101": (
"f1aeb4b969a6efcfb50fad2f0c20cfc5",
"88cf7a10940856eca736dc7b7e228a21",
),
"resnet152": (
"100835be76be38e30d865e96f2aaae62",
"ee4c566cf9a93f14d82f913c2dc6dd0c",
),
"resnet50v2": (
"3ef43a0b657b3be2300d5770ece849e0",
"fac2f116257151a9d068a22e544a4917",
),
"resnet101v2": (
"6343647c601c52e1368623803854d971",
"c0ed64b8031c3730f411d2eb4eea35b5",
),
"resnet152v2": (
"a49b44d1979771252814e80f8ec446f9",
"ed17cf2e0169df9d443503ef94b23b33",
),
"resnext50": (
"67a5b30d522ed92f75a1f16eef299d1a",
"62527c363bdd9ec598bed41947b379fc",
),
"resnext101": (
"34fb605428fcc7aa4d62f44404c11509",
"0f678c91647380debd923963594981b3",
),
}
def ResNet(
stack_fn,
preact,
use_bias,
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet",
weights_name=None,
):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Args:
stack_fn: A function that returns output tensor for the
stacked residual blocks.
preact: Whether to use pre-activation or not. `True` for ResNetV2,
`False` for ResNet and ResNeXt.
use_bias: Whether to use biases for convolutional layers or not.
`True` for ResNet and ResNetV2, `False` for ResNeXt.
name: Name of the model.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `"channels_first"` data format). It
should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation
function to use on the "top" layer. Ignored unless
`include_top=True`. Set `classifier_activation=None` to
return the logits of the "top" layer. When loading
pretrained weights, `classifier_activation` can only be
`None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. Received: "
f"weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == "channels_last":
bn_axis = 3
else:
bn_axis = 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name="conv1_pad")(
img_input
)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name="conv1_conv")(x)
if not preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="conv1_bn"
)(x)
x = layers.Activation("relu", name="conv1_relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name="pool1_pad")(x)
x = layers.MaxPooling2D(3, strides=2, name="pool1_pool")(x)
x = stack_fn(x)
if preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="post_bn"
)(x)
x = layers.Activation("relu", name="post_relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
# Validate activation for the classifier layer
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if (weights == "imagenet") and (weights_name in WEIGHTS_HASHES):
if include_top:
file_name = f"{weights_name}_weights_tf_dim_ordering_tf_kernels.h5"
file_hash = WEIGHTS_HASHES[weights_name][0]
else:
file_name = (
f"{weights_name}_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
file_hash = WEIGHTS_HASHES[weights_name][1]
weights_path = file_utils.get_file(
file_name,
f"{BASE_WEIGHTS_PATH}{file_name}",
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def residual_block_v1(
x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
"""A residual block for ResNet*_v1.
Args:
x: Input tensor.
filters: No of filters in the bottleneck layer.
kernel_size: Kernel size of the bottleneck layer. Defaults to `3`.
stride: Stride of the first layer. Defaults to `1`.
conv_shortcut: Use convolution shortcut if `True`, otherwise
use identity shortcut. Defaults to `True`
name(optional): Name of the block
Returns:
Output tensor for the residual block.
"""
if backend.image_data_format() == "channels_last":
bn_axis = 3
else:
bn_axis = 1
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=f"{name}_0_conv"
)(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn"
)(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=f"{name}_1_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_1_relu")(x)
x = layers.Conv2D(
filters, kernel_size, padding="SAME", name=f"{name}_2_conv"
)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_2_relu")(x)
x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_3_bn"
)(x)
x = layers.Add(name=f"{name}_add")([shortcut, x])
x = layers.Activation("relu", name=f"{name}_out")(x)
return x
def stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: Input tensor.
filters: Number of filters in the bottleneck layer in a block.
blocks: Number of blocks in the stacked blocks.
stride1: Stride of the first layer in the first block. Defaults to `2`.
name: Stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = residual_block_v1(x, filters, stride=stride1, name=f"{name}_block1")
for i in range(2, blocks + 1):
x = residual_block_v1(
x, filters, conv_shortcut=False, name=f"{name}_block{i}"
)
return x
def residual_block_v2(
x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None
):
"""A residual block for ResNet*_v2.
Args:
x: Input tensor.
filters: No of filters in the bottleneck layer.
kernel_size: Kernel size of the bottleneck layer. Defaults to `3`.
stride: Stride of the first layer. Defaults to `1`.
conv_shortcut: Use convolution shortcut if `True`, otherwise
use identity shortcut. Defaults to `True`
name(optional): Name of the block
Returns:
Output tensor for the residual block.
"""
if backend.image_data_format() == "channels_last":
bn_axis = 3
else:
bn_axis = 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_preact_bn"
)(x)
preact = layers.Activation("relu", name=f"{name}_preact_relu")(preact)
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=f"{name}_0_conv"
)(preact)
else:
shortcut = (
layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
)
x = layers.Conv2D(
filters, 1, strides=1, use_bias=False, name=f"{name}_1_conv"
)(preact)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_1_relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=f"{name}_2_pad")(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=f"{name}_2_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_2_relu")(x)
x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x)
x = layers.Add(name=f"{name}_out")([shortcut, x])
return x
def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: Input tensor.
filters: Number of filters in the bottleneck layer in a block.
blocks: Number of blocks in the stacked blocks.
stride1: Stride of the first layer in the first block. Defaults to `2`.
name: Stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = residual_block_v2(x, filters, conv_shortcut=True, name=f"{name}_block1")
for i in range(2, blocks):
x = residual_block_v2(x, filters, name=f"{name}_block{i}")
x = residual_block_v2(
x, filters, stride=stride1, name=f"{name}_block{str(blocks)}"
)
return x
@keras_export(
[
"keras.applications.resnet50.ResNet50",
"keras.applications.resnet.ResNet50",
"keras.applications.ResNet50",
]
)
def ResNet50(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet50",
):
"""Instantiates the ResNet50 architecture."""
def stack_fn(x):
x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name="conv2")
x = stack_residual_blocks_v1(x, 128, 4, name="conv3")
x = stack_residual_blocks_v1(x, 256, 6, name="conv4")
return stack_residual_blocks_v1(x, 512, 3, name="conv5")
return ResNet(
stack_fn,
preact=False,
use_bias=True,
weights_name="resnet50",
name=name,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.resnet.ResNet101",
"keras.applications.ResNet101",
]
)
def ResNet101(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet101",
):
"""Instantiates the ResNet101 architecture."""
def stack_fn(x):
x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name="conv2")
x = stack_residual_blocks_v1(x, 128, 4, name="conv3")
x = stack_residual_blocks_v1(x, 256, 23, name="conv4")
return stack_residual_blocks_v1(x, 512, 3, name="conv5")
return ResNet(
stack_fn,
preact=False,
use_bias=True,
name=name,
weights_name="resnet101",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.resnet.ResNet152",
"keras.applications.ResNet152",
]
)
def ResNet152(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="resnet152",
):
"""Instantiates the ResNet152 architecture."""
def stack_fn(x):
x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name="conv2")
x = stack_residual_blocks_v1(x, 128, 8, name="conv3")
x = stack_residual_blocks_v1(x, 256, 36, name="conv4")
return stack_residual_blocks_v1(x, 512, 3, name="conv5")
return ResNet(
stack_fn,
preact=False,
use_bias=True,
name=name,
weights_name="resnet152",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.resnet50.preprocess_input",
"keras.applications.resnet.preprocess_input",
]
)
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="caffe"
)
@keras_export(
[
"keras.applications.resnet50.decode_predictions",
"keras.applications.resnet.decode_predictions",
]
)
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
For image classification use cases, see [this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ResNet, call `keras.applications.resnet.preprocess_input` on your
inputs before passing them to the model. `resnet.preprocess_input` will convert
the input images from RGB to BGR, then will zero-center each color channel with
respect to the ImageNet dataset, without scaling.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet), or the path to the weights
file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format). It should have exactly 3
inputs channels, and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the output
of the last convolutional block, and thus the output of the
model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is `True`, and if no `weights` argument is
specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Model instance.
"""
setattr(ResNet50, "__doc__", ResNet50.__doc__ + DOC)
setattr(ResNet101, "__doc__", ResNet101.__doc__ + DOC)
setattr(ResNet152, "__doc__", ResNet152.__doc__ + DOC)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/inception_v3.py | keras/src/applications/inception_v3.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_export(
[
"keras.applications.inception_v3.InceptionV3",
"keras.applications.InceptionV3",
]
)
def InceptionV3(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="inception_v3",
):
"""Instantiates the Inception v3 architecture.
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For `InceptionV3`, call
`keras.applications.inception_v3.preprocess_input` on your inputs
before passing them to the model.
`inception_v3.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: Boolean, whether to include the fully-connected
layer at the top, as the last layer of the network.
Defaults to `True`.
weights: One of `None` (random initialization),
`imagenet` (pre-training on ImageNet),
or the path to the weights file to be loaded.
Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful for
sharing inputs between multiple different networks.
Defaults to `None`.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
`input_shape` will be ignored if the `input_tensor` is provided.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function
to use on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded; "
f"Received: weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top=True`, '
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == "channels_first":
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding="valid")
x = conv2d_bn(x, 32, 3, 3, padding="valid")
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding="valid")
x = conv2d_bn(x, 192, 3, 3, padding="valid")
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed0",
)
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed1",
)
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed2",
)
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding="valid")
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding="valid"
)
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name="mixed3"
)
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed4",
)
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed{0}".format(5 + i),
)
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed7",
)
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding="valid")
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding="valid"
)
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name="mixed8"
)
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2],
axis=channel_axis,
name=f"mixed9_{i}",
)
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis
)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding="same"
)(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name=f"mixed{9 + i}",
)
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"inception_v3_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="9a0d58056eeedaa3f26cb7ebd46da564",
)
else:
weights_path = file_utils.get_file(
"inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="bcbd6486424b2319ff4ef7d526e38f63",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(
x, filters, num_row, num_col, padding="same", strides=(1, 1), name=None
):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = f"{name}_bn"
conv_name = f"{name}_conv"
else:
bn_name = None
conv_name = None
if backend.image_data_format() == "channels_first":
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters,
(num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name,
)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation("relu", name=name)(x)
return x
@keras_export("keras.applications.inception_v3.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.inception_v3.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/mobilenet.py | keras/src/applications/mobilenet.py | import warnings
from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/"
)
@keras_export(
[
"keras.applications.mobilenet.MobileNet",
"keras.applications.MobileNet",
]
)
def MobileNet(
input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name=None,
):
"""Instantiates the MobileNet architecture.
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNet, call `keras.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model.
`mobilenet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format).
It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would
be one valid value. Defaults to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer. Defaults to `1.0`.
depth_multiplier: Depth multiplier for depthwise convolution.
This is called the resolution multiplier in the MobileNet paper.
Defaults to `1.0`.
dropout: Dropout rate. Defaults to `0.001`.
include_top: Boolean, whether to include the fully-connected layer
at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet), or the path to the weights file
to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful
for sharing inputs between multiple different networks.
Defaults to `None`.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into,
only to be specified if `include_top` is `True`, and if
no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function
to use on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: String, the name of the model.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == "channels_first":
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == "imagenet":
if depth_multiplier != 1:
raise ValueError(
"If imagenet weights are being loaded, "
"depth multiplier must be 1. "
f"Received depth_multiplier={depth_multiplier}"
)
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError(
"If imagenet weights are being loaded, "
"alpha can be one of"
"`0.25`, `0.50`, `0.75` or `1.0` only. "
f"Received alpha={alpha}"
)
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not in [128, 160, 192, 224]. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2
)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4
)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6
)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12
)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Dropout(dropout, name="dropout")(x)
x = layers.Conv2D(classes, (1, 1), padding="same", name="conv_preds")(x)
x = layers.Reshape((classes,), name="reshape_2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(
activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if name is None:
name = f"mobilenet_{alpha:0.2f}_{rows}"
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if alpha == 1.0:
alpha_text = "1_0"
elif alpha == 0.75:
alpha_text = "7_5"
elif alpha == 0.50:
alpha_text = "5_0"
else:
alpha_text = "2_5"
if include_top:
model_name = "mobilenet_%s_%d_tf.h5" % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
else:
model_name = "mobilenet_%s_%d_tf_no_top.h5" % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height
should be no smaller than 32. E.g. `(224, 224, 3)` would be
one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number of filters
in each layer.
- If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width
and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height.
Can be a single integer to specify the same value for all
spatial dimensions. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate`
value != 1.
Input shape:
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)`
if data_format='channels_last'. `rows` and `cols` values
might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
filters = int(filters * alpha)
x = layers.Conv2D(
filters,
kernel,
padding="same",
use_bias=False,
strides=strides,
name="conv1",
)(inputs)
x = layers.BatchNormalization(axis=channel_axis, name="conv1_bn")(x)
return layers.ReLU(6.0, name="conv1_relu")(x)
def _depthwise_conv_block(
inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1,
):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number of filters
in each layer.
- If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
Input shape:
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(
((0, 1), (0, 1)), name="conv_pad_%d" % block_id
)(inputs)
x = layers.DepthwiseConv2D(
(3, 3),
padding="same" if strides == (1, 1) else "valid",
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name="conv_dw_%d" % block_id,
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="conv_dw_%d_bn" % block_id
)(x)
x = layers.ReLU(6.0, name="conv_dw_%d_relu" % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding="same",
use_bias=False,
strides=(1, 1),
name="conv_pw_%d" % block_id,
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="conv_pw_%d_bn" % block_id
)(x)
return layers.ReLU(6.0, name="conv_pw_%d_relu" % block_id)(x)
@keras_export("keras.applications.mobilenet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.mobilenet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/vgg19.py | keras/src/applications/vgg19.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/vgg19/"
"vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_export(["keras.applications.vgg19.VGG19", "keras.applications.VGG19"])
def VGG19(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="vgg19",
):
"""Instantiates the VGG19 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG19, call `keras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model.
`vgg19.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet
dataset, without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format) or
`(3, 224, 224)` (with `"channels_first"` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. Received: "
f"weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv1"
)(img_input)
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block1_pool")(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv1"
)(x)
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block2_pool")(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv1"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv2"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv3"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv4"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block3_pool")(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv3"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv4"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block4_pool")(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv3"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv4"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block5_pool")(x)
if include_top:
# Classification block
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"vgg19_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="cbe5617147190e668d6c5d5026f83318",
)
else:
weights_path = file_utils.get_file(
"vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="253f8cb515780f3b799900260a226db6",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.vgg19.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="caffe"
)
@keras_export("keras.applications.vgg19.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/xception.py | keras/src/applications/xception.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"xception/xception_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_export(
[
"keras.applications.xception.Xception",
"keras.applications.Xception",
]
)
def Xception(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="xception",
):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input image size for this model is 299x299.
Note: each Keras Application expects a specific kind of input preprocessing.
For Xception, call `keras.applications.xception.preprocess_input`
on your inputs before passing them to the model.
`xception.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
x = layers.Conv2D(
32, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1"
)(img_input)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv1_bn")(x)
x = layers.Activation("relu", name="block1_conv1_act")(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name="block1_conv2")(x)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv2_bn")(x)
x = layers.Activation("relu", name="block1_conv2_act")(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block2_sepconv2_act")(x)
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block2_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block3_sepconv1_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block3_sepconv2_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block3_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block4_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block4_sepconv2_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block4_pool"
)(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = f"block{i + 5}"
x = layers.Activation("relu", name=f"{prefix}_sepconv1_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=f"{prefix}_sepconv1",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=f"{prefix}_sepconv1_bn"
)(x)
x = layers.Activation("relu", name=f"{prefix}_sepconv2_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=f"{prefix}_sepconv2",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=f"{prefix}_sepconv2_bn"
)(x)
x = layers.Activation("relu", name=f"{prefix}_sepconv3_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=f"{prefix}_sepconv3",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=f"{prefix}_sepconv3_bn"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
1024, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block13_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block13_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block13_sepconv2_act")(x)
x = layers.SeparableConv2D(
1024, (3, 3), padding="same", use_bias=False, name="block13_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv2_bn"
)(x)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block13_pool"
)(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(
1536, (3, 3), padding="same", use_bias=False, name="block14_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv1_act")(x)
x = layers.SeparableConv2D(
2048, (3, 3), padding="same", use_bias=False, name="block14_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv2_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv2_act")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"xception_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="0a58e3b7378bc2990ea3b43d5981f1f6",
)
else:
weights_path = file_utils.get_file(
"xception_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="b0042744bf5b25fce3cb969f33bebb97",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.xception.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.xception.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/nasnet.py | keras/src/applications/nasnet.py | import warnings
from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/nasnet/"
)
NASNET_MOBILE_WEIGHT_PATH = f"{BASE_WEIGHTS_PATH}NASNet-mobile.h5"
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}NASNet-mobile-no-top.h5"
NASNET_LARGE_WEIGHT_PATH = f"{BASE_WEIGHTS_PATH}NASNet-large.h5"
NASNET_LARGE_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}NASNet-large-no-top.h5"
def NASNet(
input_shape=None,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
classifier_activation="softmax",
name="NASNet",
):
"""Instantiates a NASNet model.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For NasNet, call `keras.applications.nasnet.preprocess_input`
on your inputs before passing them to the model.
`nasnet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile.
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
default_size: Specifies the default image size of the model
classifier_activation: A `str` or callable.
The activation function to use on the "top" layer.
Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits
of the "top" layer. When loading pretrained weights,
`classifier_activation` can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if backend.image_data_format() == "channels_first":
raise ValueError(
"NASNet does not support the `channels_first` image data "
"format. Switch to `channels_last` by editing your local "
"config file at ~/.keras/keras.json"
)
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
"as true, `classes` should be 1000"
)
if (
isinstance(input_shape, tuple)
and None in input_shape
and weights == "imagenet"
):
raise ValueError(
"When specifying the input shape of a NASNet and loading "
"`ImageNet` weights, the input_shape argument must be static"
f" (no None entries). Got: `input_shape={input_shape}`."
)
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() != "channels_last":
warnings.warn(
"The NASNet family of models is only available "
'for the input data format "channels_last" '
"(width, height, channels). "
"However your settings specify the default "
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
"in your Keras config located at ~/.keras/keras.json. "
"The model being returned right now will expect inputs "
'to follow the "channels_last" data format.',
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier**2)) != 0:
raise ValueError(
"For NASNet-A models, the `penultimate_filters` must be a multiple "
"of 24 * (`filter_multiplier` ** 2). "
f"Current value: {penultimate_filters}"
)
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
filters = penultimate_filters // 24
x = layers.Conv2D(
stem_block_filters,
(3, 3),
strides=(2, 2),
padding="valid",
use_bias=False,
name="stem_conv1",
kernel_initializer="he_normal",
)(img_input)
x = layers.BatchNormalization(
axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="stem_bn1"
)(x)
p = None
x, p = _reduction_a_cell(
x, p, filters // (filter_multiplier**2), block_id="stem_1"
)
x, p = _reduction_a_cell(
x, p, filters // filter_multiplier, block_id="stem_2"
)
for i in range(num_blocks):
x, p = _normal_a_cell(x, p, filters, block_id=f"{i}")
x, p0 = _reduction_a_cell(
x, p, filters * filter_multiplier, block_id=f"reduce_{num_blocks}"
)
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier,
block_id=f"{num_blocks + i + 1}",
)
x, p0 = _reduction_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id=f"reduce_{2 * num_blocks}",
)
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id=f"{2 * num_blocks + i + 1}",
)
x = layers.Activation("relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if default_size == 224: # mobile version
if include_top:
weights_path = file_utils.get_file(
"nasnet_mobile.h5",
NASNET_MOBILE_WEIGHT_PATH,
cache_subdir="models",
file_hash="020fb642bf7360b370c678b08e0adf61",
)
else:
weights_path = file_utils.get_file(
"nasnet_mobile_no_top.h5",
NASNET_MOBILE_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="1ed92395b5b598bdda52abe5c0dbfd63",
)
model.load_weights(weights_path)
elif default_size == 331: # large version
if include_top:
weights_path = file_utils.get_file(
"nasnet_large.h5",
NASNET_LARGE_WEIGHT_PATH,
cache_subdir="models",
file_hash="11577c9a518f0070763c2b964a382f17",
)
else:
weights_path = file_utils.get_file(
"nasnet_large_no_top.h5",
NASNET_LARGE_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="d81d89dc07e6e56530c4e77faddd61b5",
)
model.load_weights(weights_path)
else:
raise ValueError(
"ImageNet weights can only be loaded with NASNetLarge"
" or NASNetMobile"
)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export(
[
"keras.applications.nasnet.NASNetMobile",
"keras.applications.NASNetMobile",
]
)
def NASNetMobile(
input_shape=None,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="nasnet_mobile",
):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights). For loading `imagenet` weights,
`input_shape` should be (224, 224, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Keras model instance.
"""
if backend.backend() == "torch":
raise ValueError(
"NASNetMobile is not available with the torch backend "
"at this time due to an outstanding bug. "
"If interested, please open a PR."
)
if not include_top and input_shape is None:
input_shape = (224, 224, 3)
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224,
classifier_activation=classifier_activation,
name=name,
)
@keras_export(
[
"keras.applications.nasnet.NASNetLarge",
"keras.applications.NASNetLarge",
]
)
def NASNetLarge(
input_shape=None,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="nasnet_large",
):
"""Instantiates a NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights). For loading `imagenet` weights,
`input_shape` should be (331, 331, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Keras model instance.
"""
return NASNet(
input_shape,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331,
classifier_activation=classifier_activation,
name=name,
)
def _separable_conv_block(
ip, filters, kernel_size=(3, 3), strides=(1, 1), block_id=None
):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Args:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"separable_conv_block_{block_id}"):
x = layers.Activation("relu")(ip)
if strides == (2, 2):
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=f"separable_conv_1_pad_{block_id}",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.SeparableConv2D(
filters,
kernel_size,
strides=strides,
name=f"separable_conv_1_{block_id}",
padding=conv_pad,
use_bias=False,
)(x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"separable_conv_1_bn_{block_id}",
)(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(
filters,
kernel_size,
name=f"separable_conv_2_{block_id}",
padding="same",
use_bias=False,
)(x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"separable_conv_2_bn_{block_id}",
)(x)
return x
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Args:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
img_dim = 2 if backend.image_data_format() == "channels_first" else -2
with backend.name_scope("adjust_block"):
if p is None:
p = ip
elif p.shape[img_dim] != ip.shape[img_dim]:
with backend.name_scope(f"adjust_reduction_block_{block_id}"):
p = layers.Activation("relu", name=f"adjust_relu_1_{block_id}")(
p
)
p1 = layers.AveragePooling2D(
(1, 1),
strides=(2, 2),
padding="valid",
name=f"adjust_avg_pool_1_{block_id}",
)(p)
p1 = layers.Conv2D(
filters // 2,
(1, 1),
padding="same",
use_bias=False,
name=f"adjust_conv_1_{block_id}",
kernel_initializer="he_normal",
)(p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D(
(1, 1),
strides=(2, 2),
padding="valid",
name=f"adjust_avg_pool_2_{block_id}",
)(p2)
p2 = layers.Conv2D(
filters // 2,
(1, 1),
padding="same",
use_bias=False,
name=f"adjust_conv_2_{block_id}",
kernel_initializer="he_normal",
)(p2)
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"adjust_bn_{block_id}",
)(p)
elif p.shape[channel_dim] != filters:
with backend.name_scope(f"adjust_projection_block_{block_id}"):
p = layers.Activation("relu")(p)
p = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"adjust_conv_projection_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(p)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"adjust_bn_{block_id}",
)(p)
return p
def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"normal_A_block_{block_id}"):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation("relu")(ip)
h = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"normal_conv_1_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"normal_bn_1_{block_id}",
)(h)
with backend.name_scope("block_1"):
x1_1 = _separable_conv_block(
h,
filters,
kernel_size=(5, 5),
block_id=f"normal_left1_{block_id}",
)
x1_2 = _separable_conv_block(
p, filters, block_id=f"normal_right1_{block_id}"
)
x1 = layers.add([x1_1, x1_2], name=f"normal_add_1_{block_id}")
with backend.name_scope("block_2"):
x2_1 = _separable_conv_block(
p, filters, (5, 5), block_id=f"normal_left2_{block_id}"
)
x2_2 = _separable_conv_block(
p, filters, (3, 3), block_id=f"normal_right2_{block_id}"
)
x2 = layers.add([x2_1, x2_2], name=f"normal_add_2_{block_id}")
with backend.name_scope("block_3"):
x3 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_left3_{block_id}",
)(h)
x3 = layers.add([x3, p], name=f"normal_add_3_{block_id}")
with backend.name_scope("block_4"):
x4_1 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_left4_{block_id}",
)(p)
x4_2 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_right4_{block_id}",
)(p)
x4 = layers.add([x4_1, x4_2], name=f"normal_add_4_{block_id}")
with backend.name_scope("block_5"):
x5 = _separable_conv_block(
h, filters, block_id=f"normal_left5_{block_id}"
)
x5 = layers.add([x5, h], name=f"normal_add_5_{block_id}")
x = layers.concatenate(
[p, x1, x2, x3, x4, x5],
axis=channel_dim,
name=f"normal_concat_{block_id}",
)
return x, ip
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"reduction_A_block_{block_id}"):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation("relu")(ip)
h = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"reduction_conv_1_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"reduction_bn_1_{block_id}",
)(h)
h3 = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(h, 3),
name=f"reduction_pad_1_{block_id}",
)(h)
with backend.name_scope("block_1"):
x1_1 = _separable_conv_block(
h,
filters,
(5, 5),
strides=(2, 2),
block_id=f"reduction_left1_{block_id}",
)
x1_2 = _separable_conv_block(
p,
filters,
(7, 7),
strides=(2, 2),
block_id=f"reduction_right1_{block_id}",
)
x1 = layers.add([x1_1, x1_2], name=f"reduction_add_1_{block_id}")
with backend.name_scope("block_2"):
x2_1 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_left2_{block_id}",
)(h3)
x2_2 = _separable_conv_block(
p,
filters,
(7, 7),
strides=(2, 2),
block_id=f"reduction_right2_{block_id}",
)
x2 = layers.add([x2_1, x2_2], name=f"reduction_add_2_{block_id}")
with backend.name_scope("block_3"):
x3_1 = layers.AveragePooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_left3_{block_id}",
)(h3)
x3_2 = _separable_conv_block(
p,
filters,
(5, 5),
strides=(2, 2),
block_id=f"reduction_right3_{block_id}",
)
x3 = layers.add([x3_1, x3_2], name=f"reduction_add3_{block_id}")
with backend.name_scope("block_4"):
x4 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"reduction_left4_{block_id}",
)(x1)
x4 = layers.add([x2, x4])
with backend.name_scope("block_5"):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id=f"reduction_left4_{block_id}"
)
x5_2 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_right5_{block_id}",
)(h3)
x5 = layers.add([x5_1, x5_2], name=f"reduction_add4_{block_id}")
x = layers.concatenate(
[x2, x3, x4, x5],
axis=channel_dim,
name=f"reduction_concat_{block_id}",
)
return x, ip
@keras_export("keras.applications.nasnet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.nasnet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/applications_test.py | keras/src/applications/applications_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.applications import convnext
from keras.src.applications import densenet
from keras.src.applications import efficientnet
from keras.src.applications import efficientnet_v2
from keras.src.applications import inception_resnet_v2
from keras.src.applications import inception_v3
from keras.src.applications import mobilenet
from keras.src.applications import mobilenet_v2
from keras.src.applications import mobilenet_v3
from keras.src.applications import nasnet
from keras.src.applications import resnet
from keras.src.applications import resnet_v2
from keras.src.applications import vgg16
from keras.src.applications import vgg19
from keras.src.applications import xception
from keras.src.layers import Conv2D
from keras.src.layers import Input
from keras.src.saving import serialization_lib
from keras.src.utils import file_utils
from keras.src.utils import image_utils
try:
import PIL
except ImportError:
PIL = None
MODEL_LIST = [
# vgg
(vgg16.VGG16, 512, vgg16),
(vgg19.VGG19, 512, vgg19),
# xception
(xception.Xception, 2048, xception),
# inception
(inception_v3.InceptionV3, 2048, inception_v3),
(inception_resnet_v2.InceptionResNetV2, 1536, inception_resnet_v2),
# mobilenet
(mobilenet.MobileNet, 1024, mobilenet),
(mobilenet_v2.MobileNetV2, 1280, mobilenet_v2),
(mobilenet_v3.MobileNetV3Small, 576, mobilenet_v3),
(mobilenet_v3.MobileNetV3Large, 960, mobilenet_v3),
# efficientnet
(efficientnet.EfficientNetB0, 1280, efficientnet),
(efficientnet.EfficientNetB1, 1280, efficientnet),
(efficientnet.EfficientNetB2, 1408, efficientnet),
(efficientnet.EfficientNetB3, 1536, efficientnet),
(efficientnet.EfficientNetB4, 1792, efficientnet),
(efficientnet.EfficientNetB5, 2048, efficientnet),
(efficientnet.EfficientNetB6, 2304, efficientnet),
(efficientnet.EfficientNetB7, 2560, efficientnet),
(efficientnet_v2.EfficientNetV2B0, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B1, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B2, 1408, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B3, 1536, efficientnet_v2),
(efficientnet_v2.EfficientNetV2S, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2M, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2L, 1280, efficientnet_v2),
# densenet
(densenet.DenseNet121, 1024, densenet),
(densenet.DenseNet169, 1664, densenet),
(densenet.DenseNet201, 1920, densenet),
# convnext
(convnext.ConvNeXtTiny, 768, convnext),
(convnext.ConvNeXtSmall, 768, convnext),
(convnext.ConvNeXtBase, 1024, convnext),
(convnext.ConvNeXtLarge, 1536, convnext),
(convnext.ConvNeXtXLarge, 2048, convnext),
# nasnet
(nasnet.NASNetMobile, 1056, nasnet),
(nasnet.NASNetLarge, 4032, nasnet),
# resnet
(resnet.ResNet50, 2048, resnet),
(resnet.ResNet101, 2048, resnet),
(resnet.ResNet152, 2048, resnet),
(resnet_v2.ResNet50V2, 2048, resnet_v2),
(resnet_v2.ResNet101V2, 2048, resnet_v2),
(resnet_v2.ResNet152V2, 2048, resnet_v2),
]
MODELS_UNSUPPORTED_CHANNELS_FIRST = ["ConvNeXt", "DenseNet", "NASNet"]
# Add names for `named_parameters`, and add each data format for each model
test_parameters = [
(
"{}_{}".format(model[0].__name__, image_data_format),
*model,
image_data_format,
)
for image_data_format in ["channels_first", "channels_last"]
for model in MODEL_LIST
]
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
TEST_IMAGE_PATH = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/tests/elephant.jpg"
)
if target_size[0] is None:
target_size = (299, 299)
test_image = file_utils.get_file("elephant.jpg", TEST_IMAGE_PATH)
img = image_utils.load_img(test_image, target_size=tuple(target_size))
x = image_utils.img_to_array(img)
return np.expand_dims(x, axis=0)
@pytest.mark.skipif(
os.environ.get("SKIP_APPLICATIONS_TESTS"),
reason="Env variable set to skip.",
)
@pytest.mark.requires_trainable_backend
class ApplicationsTest(testing.TestCase):
@classmethod
def setUpClass(cls):
cls.original_image_data_format = backend.image_data_format()
@classmethod
def tearDownClass(cls):
backend.set_image_data_format(cls.original_image_data_format)
def skip_if_invalid_image_data_format_for_model(
self, app, image_data_format
):
does_not_support_channels_first = any(
[
unsupported_name.lower() in app.__name__.lower()
for unsupported_name in MODELS_UNSUPPORTED_CHANNELS_FIRST
]
)
if (
image_data_format == "channels_first"
and does_not_support_channels_first
):
self.skipTest(
"{} does not support channels first".format(app.__name__)
)
@parameterized.named_parameters(test_parameters)
def test_application_notop_variable_input_channels(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
# Test compatibility with 1 channel
if image_data_format == "channels_first":
input_shape = (1, None, None)
correct_output_shape = [None, last_dim, None, None]
else:
input_shape = (None, None, 1)
correct_output_shape = [None, None, None, last_dim]
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, correct_output_shape)
# Test compatibility with 4 channels
if image_data_format == "channels_first":
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, correct_output_shape)
@parameterized.named_parameters(test_parameters)
@pytest.mark.skipif(PIL is None, reason="Requires PIL.")
def test_application_base(self, app, _, app_module, image_data_format):
import tensorflow as tf
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
if (
image_data_format == "channels_first"
and len(tf.config.list_physical_devices("GPU")) == 0
and backend.backend() == "tensorflow"
):
self.skipTest(
"Conv2D doesn't support channels_first using CPU with "
"tensorflow backend"
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
# Can be instantiated with default arguments
model = app(weights="imagenet")
# Can run a correct inference on a test image
if image_data_format == "channels_first":
shape = model.input_shape[2:4]
else:
shape = model.input_shape[1:3]
x = _get_elephant(shape)
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn("African_elephant", names[:3])
# Can be serialized and deserialized
config = serialization_lib.serialize_keras_object(model)
reconstructed_model = serialization_lib.deserialize_keras_object(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
@parameterized.named_parameters(test_parameters)
def test_application_notop_custom_input_shape(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (3, 123, 123)
last_dim_axis = 1
else:
input_shape = (123, 123, 3)
last_dim_axis = -1
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape[last_dim_axis], last_dim)
@parameterized.named_parameters(test_parameters)
def test_application_notop_custom_input_tensor(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (4, 123, 123)
last_dim_axis = 1
else:
input_shape = (123, 123, 4)
last_dim_axis = -1
inputs_custom = Input(shape=input_shape, name="custom_input")
inputs_custom = Conv2D(3, (2, 2), padding="valid", strides=(2, 2))(
inputs_custom
)
model = app(weights=None, include_top=False, input_tensor=inputs_custom)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape[last_dim_axis], last_dim)
@parameterized.named_parameters(test_parameters)
def test_application_pooling(self, app, last_dim, _, image_data_format):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
model = app(weights=None, include_top=False, pooling="max")
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, [None, last_dim])
@parameterized.named_parameters(test_parameters)
def test_application_classifier_activation(self, app, *_):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
model = app(
weights=None, include_top=True, classifier_activation="softmax"
)
last_layer_act = model.layers[-1].activation.__name__
self.assertEqual(last_layer_act, "softmax")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/convnext.py | keras/src/applications/convnext.py | import numpy as np
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import random
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.layers.layer import Layer
from keras.src.models import Functional
from keras.src.models import Sequential
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/convnext/"
)
WEIGHTS_HASHES = {
"convnext_tiny": (
"8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff",
"d547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1",
),
"convnext_small": (
"ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c",
"6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab",
),
"convnext_base": (
"52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6",
"40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45",
),
"convnext_large": (
"070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6",
"96f02b6f0753d4f543261bc9d09bed650f24dd6bc02ddde3066135b63d23a1cd",
),
"convnext_xlarge": (
"c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee",
"de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05",
),
}
MODEL_CONFIGS = {
"tiny": {
"depths": [3, 3, 9, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"small": {
"depths": [3, 3, 27, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"base": {
"depths": [3, 3, 27, 3],
"projection_dims": [128, 256, 512, 1024],
"default_size": 224,
},
"large": {
"depths": [3, 3, 27, 3],
"projection_dims": [192, 384, 768, 1536],
"default_size": 224,
},
"xlarge": {
"depths": [3, 3, 27, 3],
"projection_dims": [256, 512, 1024, 2048],
"default_size": 224,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
References:
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
(CVPR 2022)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The `base`, `large`, and `xlarge` models were first pre-trained on the
ImageNet-21k dataset and then fine-tuned on the ImageNet-1k dataset. The
pre-trained parameters of the models were assembled from the
[official repository](https://github.com/facebookresearch/ConvNeXt). To get a
sense of how these parameters were converted to Keras compatible parameters,
please refer to
[this repository](https://github.com/sayakpaul/keras-convnext-conversion).
Note: Each Keras Application expects a specific kind of input preprocessing.
For ConvNeXt, preprocessing is included in the model using a `Normalization`
layer. ConvNeXt models expect their inputs to be float or uint8 tensors of
pixels with values in the [0-255] range.
When calling the `summary()` method after instantiating a ConvNeXt model,
prefer setting the `expand_nested` argument `summary()` to `True` to better
investigate the instantiated model.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet-1k), or the path to the weights
file to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is `False`.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `"softmax"`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
class StochasticDepth(Layer):
"""Stochastic Depth module.
It performs batch-wise dropping rather than sample-wise. In libraries like
`timm`, it's similar to `DropPath` layers that drops residual paths
sample-wise.
References:
- https://github.com/rwightman/pytorch-image-models
Args:
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
Returns:
Tensor either with the residual path dropped or kept.
"""
def __init__(self, drop_path_rate, **kwargs):
super().__init__(**kwargs)
self.drop_path_rate = drop_path_rate
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_path_rate
shape = (ops.shape(x)[0],) + (1,) * (len(ops.shape(x)) - 1)
random_tensor = keep_prob + random.uniform(shape, 0, 1)
random_tensor = ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
def get_config(self):
config = super().get_config()
config.update({"drop_path_rate": self.drop_path_rate})
return config
class LayerScale(Layer):
"""Layer scale module.
References:
- https://arxiv.org/abs/2103.17239
Args:
init_values (float): Initial value for layer scale. Should be within
[0, 1].
projection_dim (int): Projection dimensionality.
Returns:
Tensor multiplied to the scale.
"""
def __init__(self, init_values, projection_dim, **kwargs):
super().__init__(**kwargs)
self.init_values = init_values
self.projection_dim = projection_dim
def build(self, _):
self.gamma = self.add_weight(
shape=(self.projection_dim,),
initializer=initializers.Constant(self.init_values),
trainable=True,
)
def call(self, x):
return x * self.gamma
def get_config(self):
config = super().get_config()
config.update(
{
"init_values": self.init_values,
"projection_dim": self.projection_dim,
}
)
return config
def ConvNeXtBlock(
projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None
):
"""ConvNeXt block.
References:
- https://arxiv.org/abs/2201.03545
- https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
Notes:
In the original ConvNeXt implementation (linked above), the authors use
`Dense` layers for pointwise convolutions for increased efficiency.
Following that, this implementation also uses the same.
Args:
projection_dim (int): Number of filters for convolution layers. In the
ConvNeXt paper, this is referred to as projection dimension.
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
layer_scale_init_value (float): Layer scale value.
Should be a small float number.
name: name to path to the keras layer.
Returns:
A function representing a ConvNeXtBlock block.
"""
if name is None:
name = f"prestem{str(backend.get_uid('prestem'))}"
def apply(inputs):
x = inputs
x = layers.Conv2D(
filters=projection_dim,
kernel_size=7,
padding="same",
groups=projection_dim,
name=f"{name}_depthwise_conv",
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=f"{name}_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=f"{name}_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=f"{name}_gelu")(x)
x = layers.Dense(projection_dim, name=f"{name}_pointwise_conv_2")(x)
if layer_scale_init_value is not None:
x = LayerScale(
layer_scale_init_value,
projection_dim,
name=f"{name}_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(
drop_path_rate, name=f"{name}_stochastic_depth"
)
else:
layer = layers.Activation("linear", name=f"{name}_identity")
return inputs + layer(x)
return apply
def PreStem(name=None):
"""Normalizes inputs with ImageNet-1k mean and std."""
if name is None:
name = "prestem{0}".format(str(backend.get_uid("prestem")))
def apply(x):
x = layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[
(0.229 * 255) ** 2,
(0.224 * 255) ** 2,
(0.225 * 255) ** 2,
],
name=f"{name}_prestem_normalization",
)(x)
return x
return apply
def Head(num_classes=1000, classifier_activation=None, name=None):
"""Implementation of classification head of ConvNeXt.
Args:
num_classes: number of classes for Dense layer
classifier_activation: activation function for the Dense layer
name: name prefix
Returns:
Classification head function.
"""
if name is None:
name = str(backend.get_uid("head"))
def apply(x):
x = layers.GlobalAveragePooling2D(name=f"{name}_head_gap")(x)
x = layers.LayerNormalization(
epsilon=1e-6, name=f"{name}_head_layernorm"
)(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name=f"{name}_head_dense",
)(x)
return x
return apply
def ConvNeXt(
depths,
projection_dims,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=224,
name="convnext",
include_preprocessing=True,
include_top=True,
weights=None,
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
weights_name=None,
):
"""Instantiates ConvNeXt architecture given specific configuration.
Args:
depths: An iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: Stochastic depth probability. If 0.0, then stochastic
depth won't be used.
layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling
won't be used.
default_size: Default input image size.
name: An optional name for the model.
include_preprocessing: boolean denoting whether to
include preprocessing in the model.
When `weights="imagenet"` this should always be `True`.
But for other models (e.g., randomly initialized) you should set it
to `False` and apply preprocessing to data accordingly.
include_top: Boolean denoting whether to include classification
head to the model.
weights: one of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet-1k),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is `False`. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `avg` means that global average pooling will be applied
to the output of the last convolutional layer,
and thus the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into,
only to be specified if `include_top` is `True`,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable.
The activation function to use
on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits
of the "top" layer.
Returns:
A model instance.
"""
if backend.image_data_format() == "channels_first":
raise ValueError(
"ConvNeXt does not support the `channels_first` image data "
"format. Switch to `channels_last` by editing your local "
"config file at ~/.keras/keras.json"
)
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top=True`, '
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)[0]
x = input_tensor
else:
inputs = img_input
x = inputs
if include_preprocessing:
channel_axis = (
3 if backend.image_data_format() == "channels_last" else 1
)
num_channels = input_shape[channel_axis - 1]
if num_channels == 3:
x = PreStem(name=name)(x)
# Stem block.
stem = Sequential(
[
layers.Conv2D(
projection_dims[0],
kernel_size=4,
strides=4,
name=f"{name}_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=f"{name}_stem_layernorm"
),
],
name=f"{name}_stem",
)
# Downsampling blocks.
downsample_layers = []
downsample_layers.append(stem)
num_downsample_layers = 3
for i in range(num_downsample_layers):
downsample_layer = Sequential(
[
layers.LayerNormalization(
epsilon=1e-6,
name=f"{name}_downsampling_layernorm_{i}",
),
layers.Conv2D(
projection_dims[i + 1],
kernel_size=2,
strides=2,
name=f"{name}_downsampling_conv_{i}",
),
],
name=f"{name}_downsampling_block_{i}",
)
downsample_layers.append(downsample_layer)
# Stochastic depth schedule.
# This is referred from the original ConvNeXt codebase:
# https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
depth_drop_rates = [
float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths))
]
# First apply downsampling blocks and then apply ConvNeXt stages.
cur = 0
num_convnext_blocks = 4
for i in range(num_convnext_blocks):
x = downsample_layers[i](x)
for j in range(depths[i]):
x = ConvNeXtBlock(
projection_dim=projection_dims[i],
drop_path_rate=depth_drop_rates[cur + j],
layer_scale_init_value=layer_scale_init_value,
name=name + f"_stage_{i}_block_{j}",
)(x)
cur += depths[i]
if include_top:
imagenet_utils.validate_activation(classifier_activation, weights)
x = Head(
num_classes=classes,
classifier_activation=classifier_activation,
name=name,
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
model = Functional(inputs=inputs, outputs=x, name=name)
# Validate weights before requesting them from the API
if weights == "imagenet":
expected_config = MODEL_CONFIGS[weights_name.split("convnext_")[-1]]
if (
depths != expected_config["depths"]
or projection_dims != expected_config["projection_dims"]
):
raise ValueError(
f"Architecture configuration does not match {weights_name} "
f"variant. When using pre-trained weights, the model "
f"architecture must match the pre-trained configuration "
f"exactly. Expected depths: {expected_config['depths']}, "
f"got: {depths}. Expected projection_dims: "
f"{expected_config['projection_dims']}, got: {projection_dims}."
)
if weights_name not in name:
raise ValueError(
f'Model name "{name}" does not match weights variant '
f'"{weights_name}". When using imagenet weights, model name '
f'must contain the weights variant (e.g., "convnext_'
f'{weights_name.split("convnext_")[-1]}").'
)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[weights_name][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[weights_name][1]
file_name = name + file_suffix
weights_path = file_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
## Instantiating variants ##
@keras_export(
[
"keras.applications.convnext.ConvNeXtTiny",
"keras.applications.ConvNeXtTiny",
]
)
def ConvNeXtTiny(
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="convnext_tiny",
):
return ConvNeXt(
weights_name="convnext_tiny",
depths=MODEL_CONFIGS["tiny"]["depths"],
projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["tiny"]["default_size"],
name=name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.convnext.ConvNeXtSmall",
"keras.applications.ConvNeXtSmall",
]
)
def ConvNeXtSmall(
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="convnext_small",
):
return ConvNeXt(
weights_name="convnext_small",
depths=MODEL_CONFIGS["small"]["depths"],
projection_dims=MODEL_CONFIGS["small"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["small"]["default_size"],
name=name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.convnext.ConvNeXtBase",
"keras.applications.ConvNeXtBase",
]
)
def ConvNeXtBase(
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="convnext_base",
):
return ConvNeXt(
weights_name="convnext_base",
depths=MODEL_CONFIGS["base"]["depths"],
projection_dims=MODEL_CONFIGS["base"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["base"]["default_size"],
name=name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.convnext.ConvNeXtLarge",
"keras.applications.ConvNeXtLarge",
]
)
def ConvNeXtLarge(
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="convnext_large",
):
return ConvNeXt(
weights_name="convnext_large",
depths=MODEL_CONFIGS["large"]["depths"],
projection_dims=MODEL_CONFIGS["large"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["large"]["default_size"],
name=name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
[
"keras.applications.convnext.ConvNeXtXLarge",
"keras.applications.ConvNeXtXLarge",
]
)
def ConvNeXtXLarge(
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="convnext_xlarge",
):
return ConvNeXt(
weights_name="convnext_xlarge",
depths=MODEL_CONFIGS["xlarge"]["depths"],
projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["xlarge"]["default_size"],
name=name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny")
ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall")
ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase")
ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge")
ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge")
@keras_export("keras.applications.convnext.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the convnext model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`keras.backend.image_data_format()` is used
(unless you changed it, it defaults to `"channels_last"`).{mode}
Returns:
Unchanged `numpy.array` or tensor.
"""
return x
@keras_export("keras.applications.convnext.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/imagenet_utils.py | keras/src/applications/imagenet_utils.py | import json
import warnings
import numpy as np
from keras.src import activations
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.utils import file_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = (
"https://storage.googleapis.com/download.tensorflow.org/"
"data/imagenet_class_index.json"
)
PREPROCESS_INPUT_DOC = """
Preprocesses a tensor or Numpy array encoding a batch of images.
Usage example with `applications.MobileNet`:
```python
i = keras.layers.Input([None, None, 3], dtype="uint8")
x = ops.cast(i, "float32")
x = keras.applications.mobilenet.preprocess_input(x)
core = keras.applications.MobileNet()
x = core(x)
model = keras.Model(inputs=[i], outputs=[x])
result = model(image)
```
Args:
x: A floating point `numpy.array` or a backend-native tensor,
3D or 4D with 3 color
channels, with values in the range [0, 255].
The preprocessed data are written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Optional data format of the image tensor/array. None, means
the global setting `keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").{mode}
Defaults to `None`.
Returns:
Preprocessed array with type `float32`.
{ret}
Raises:
{error}
"""
PREPROCESS_INPUT_MODE_DOC = """
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Defaults to `"caffe"`.
"""
PREPROCESS_INPUT_DEFAULT_ERROR_DOC = """
ValueError: In case of unknown `mode` or `data_format` argument."""
PREPROCESS_INPUT_ERROR_DOC = """
ValueError: In case of unknown `data_format` argument."""
PREPROCESS_INPUT_RET_DOC_TF = """
The inputs pixel values are scaled between -1 and 1, sample-wise."""
PREPROCESS_INPUT_RET_DOC_TORCH = """
The input pixels values are scaled between 0 and 1 and each channel is
normalized with respect to the ImageNet dataset."""
PREPROCESS_INPUT_RET_DOC_CAFFE = """
The images are converted from RGB to BGR, then each color channel is
zero-centered with respect to the ImageNet dataset, without scaling."""
@keras_export("keras.applications.imagenet_utils.preprocess_input")
def preprocess_input(x, data_format=None, mode="caffe"):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {"caffe", "tf", "torch"}:
raise ValueError(
"Expected mode to be one of `caffe`, `tf` or `torch`. "
f"Received: mode={mode}"
)
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"Expected data_format to be one of `channels_first` or "
f"`channels_last`. Received: data_format={data_format}"
)
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format, mode=mode)
else:
return _preprocess_tensor_input(x, data_format=data_format, mode=mode)
preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(
mode=PREPROCESS_INPUT_MODE_DOC,
ret="",
error=PREPROCESS_INPUT_DEFAULT_ERROR_DOC,
)
@keras_export("keras.applications.imagenet_utils.decode_predictions")
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Args:
preds: NumPy array encoding a batch of predictions.
top: Integer, how many top-guesses to return. Defaults to `5`.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError(
"`decode_predictions` expects "
"a batch of predictions "
"(i.e. a 2D array of shape (samples, 1000)). "
f"Received array with shape: {preds.shape}"
)
if CLASS_INDEX is None:
fpath = file_utils.get_file(
"imagenet_class_index.json",
CLASS_INDEX_PATH,
cache_subdir="models",
file_hash="c2c37ea517e94d9795004a39431a14cb",
)
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
preds = ops.convert_to_numpy(preds)
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a NumPy array encoding a batch of images.
Args:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
"""
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == "tf":
x /= 127.5
x -= 1.0
return x
elif mode == "torch":
x /= 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == "channels_first":
# 'RGB'->'BGR'
if len(x.shape) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == "channels_first":
if len(x.shape) == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def _preprocess_tensor_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Args:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
ndim = len(x.shape)
if mode == "tf":
x /= 127.5
x -= 1.0
return x
elif mode == "torch":
x /= 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == "channels_first":
# 'RGB'->'BGR'
if len(x.shape) == 3:
x = ops.stack([x[i, ...] for i in (2, 1, 0)], axis=0)
else:
x = ops.stack([x[:, i, :] for i in (2, 1, 0)], axis=1)
else:
# 'RGB'->'BGR'
x = ops.stack([x[..., i] for i in (2, 1, 0)], axis=-1)
mean = [103.939, 116.779, 123.68]
std = None
mean_tensor = ops.convert_to_tensor(-np.array(mean), dtype=x.dtype)
# Zero-center by mean pixel
if data_format == "channels_first":
if len(x.shape) == 3:
mean_tensor = ops.reshape(mean_tensor, (3, 1, 1))
else:
mean_tensor = ops.reshape(mean_tensor, (1, 3) + (1,) * (ndim - 2))
else:
mean_tensor = ops.reshape(mean_tensor, (1,) * (ndim - 1) + (3,))
x += mean_tensor
if std is not None:
std_tensor = ops.convert_to_tensor(np.array(std), dtype=x.dtype)
if data_format == "channels_first":
std_tensor = ops.reshape(std_tensor, (-1, 1, 1))
x /= std_tensor
return x
def obtain_input_shape(
input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None,
):
"""Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != "imagenet" and input_shape and len(input_shape) == 3:
if data_format == "channels_first":
correct_channel_axis = 1 if len(input_shape) == 4 else 0
if input_shape[correct_channel_axis] not in {1, 3}:
warnings.warn(
"This model usually expects 1 or 3 input channels. "
"However, it was passed an input_shape "
f"with {input_shape[0]} input channels.",
stacklevel=2,
)
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
"This model usually expects 1 or 3 input channels. "
"However, it was passed an input_shape "
f"with {input_shape[-1]} input channels.",
stacklevel=2,
)
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == "channels_first":
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == "imagenet" and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError(
"When setting `include_top=True` "
"and loading `imagenet` weights, "
f"`input_shape` should be {default_shape}. "
f"Received: input_shape={input_shape}"
)
return default_shape
if input_shape:
if data_format == "channels_first":
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
"`input_shape` must be a tuple of three integers."
)
if input_shape[0] != 3 and weights == "imagenet":
raise ValueError(
"The input must have 3 channels; Received "
f"`input_shape={input_shape}`"
)
if (
input_shape[1] is not None and input_shape[1] < min_size
) or (input_shape[2] is not None and input_shape[2] < min_size):
raise ValueError(
f"Input size must be at least {min_size}"
f"x{min_size}; Received: "
f"input_shape={input_shape}"
)
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
"`input_shape` must be a tuple of three integers."
)
if input_shape[-1] != 3 and weights == "imagenet":
raise ValueError(
"The input must have 3 channels; Received "
f"`input_shape={input_shape}`"
)
if (
input_shape[0] is not None and input_shape[0] < min_size
) or (input_shape[1] is not None and input_shape[1] < min_size):
raise ValueError(
"Input size must be at least "
f"{min_size}x{min_size}; Received: "
f"input_shape={input_shape}"
)
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == "channels_first":
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError(
"If `include_top` is True, "
"you should specify a static `input_shape`. "
f"Received: input_shape={input_shape}"
)
return input_shape
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 2 if backend.image_data_format() == "channels_first" else 1
input_size = inputs.shape[img_dim : (img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def validate_activation(classifier_activation, weights):
"""validates that the classifer_activation is compatible with the weights.
Args:
classifier_activation: str or callable activation function
weights: The pretrained weights to load.
Raises:
ValueError: if an activation other than `None` or `softmax` are used with
pretrained weights.
"""
if weights is None:
return
classifier_activation = activations.get(classifier_activation)
if classifier_activation not in {
activations.get("softmax"),
activations.get(None),
}:
raise ValueError(
"Only `None` and `softmax` activations are allowed "
"for the `classifier_activation` argument when using "
"pretrained weights, with `include_top=True`; Received: "
f"classifier_activation={classifier_activation}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/__init__.py | keras/src/applications/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/inception_resnet_v2.py | keras/src/applications/inception_resnet_v2.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.layers.layer import Layer
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHT_URL = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/inception_resnet_v2/"
)
@keras_export(
[
"keras.applications.inception_resnet_v2.InceptionResNetV2",
"keras.applications.InceptionResNetV2",
]
)
def InceptionResNetV2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="inception_resnet_v2",
):
"""Instantiates the Inception-ResNet v2 architecture.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of
input preprocessing. For InceptionResNetV2, call
`keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model.
`inception_resnet_v2.preprocess_input`
will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)`
(with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable.
The activation function to use on the "top" layer.
Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits
of the "top" layer. When loading pretrained weights,
`classifier_activation` can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top=True`, '
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding="valid")
x = conv2d_bn(x, 32, 3, padding="valid")
x = conv2d_bn(x, 64, 3)
x = layers.MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding="valid")
x = conv2d_bn(x, 192, 3, padding="valid")
x = layers.MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = layers.AveragePooling2D(3, strides=1, padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if backend.image_data_format() == "channels_first" else 3
x = layers.Concatenate(axis=channel_axis, name="mixed_5b")(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type="block35", block_idx=block_idx
)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding="valid")
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding="valid")
branch_pool = layers.MaxPooling2D(3, strides=2, padding="valid")(x)
branches = [branch_0, branch_1, branch_pool]
x = layers.Concatenate(axis=channel_axis, name="mixed_6a")(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type="block17", block_idx=block_idx
)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding="valid")
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding="valid")
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding="valid")
branch_pool = layers.MaxPooling2D(3, strides=2, padding="valid")(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = layers.Concatenate(axis=channel_axis, name="mixed_7a")(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type="block8", block_idx=block_idx
)
x = inception_resnet_block(
x, scale=1.0, activation=None, block_type="block8", block_idx=10
)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name="conv_7b")
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
fname = "inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5"
weights_path = file_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir="models",
file_hash="e693bd0210a403b3192acc6073ad2e96",
)
else:
fname = (
"inception_resnet_v2_weights_"
"tf_dim_ordering_tf_kernels_notop.h5"
)
weights_path = file_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir="models",
file_hash="d19885ff4a710c122648d3b5c3b684e4",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(
x,
filters,
kernel_size,
strides=1,
padding="same",
activation="relu",
use_bias=False,
name=None,
):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'`
for the activation and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name,
)(x)
if not use_bias:
bn_axis = 1 if backend.image_data_format() == "channels_first" else 3
bn_name = None if name is None else f"{name}_bn"
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(
x
)
if activation is not None:
ac_name = None if name is None else f"{name}_ac"
x = layers.Activation(activation, name=ac_name)(x)
return x
class CustomScaleLayer(Layer):
def __init__(self, scale, **kwargs):
super().__init__(**kwargs)
self.scale = scale
def get_config(self):
config = super().get_config()
config.update({"scale": self.scale})
return config
def call(self, inputs):
return inputs[0] + inputs[1] * self.scale
def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"):
"""Adds an Inception-ResNet block.
Args:
x: input tensor.
scale: scaling factor to scale the residuals
(i.e., the output of passing `x` through an inception module)
before adding them to the shortcut
branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`,
determines the network structure in the residual branch.
block_idx: an `int` used for generating layer names.
The Inception-ResNet blocks are repeated many times
in this network. We use `block_idx` to identify each
of the repetitions. For example, the first
Inception-ResNet-A block will have
`block_type='block35', block_idx=0`, and the layer names
will have a common prefix `'block35_0'`.
activation: activation function to use at the end of the block.
Returns:
Output tensor for the block.
"""
if block_type == "block35":
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == "block17":
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == "block8":
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError(
"Unknown Inception-ResNet block type. "
'Expects "block35", "block17" or "block8", '
f"but got: {block_type}"
)
block_name = f"{block_type}_{block_idx}"
channel_axis = 1 if backend.image_data_format() == "channels_first" else 3
mixed = layers.Concatenate(axis=channel_axis, name=f"{block_name}_mixed")(
branches
)
up = conv2d_bn(
mixed,
x.shape[channel_axis],
1,
activation=None,
use_bias=True,
name=f"{block_name}_conv",
)
x = CustomScaleLayer(scale)([x, up])
if activation is not None:
x = layers.Activation(activation, name=f"{block_name}_ac")(x)
return x
@keras_export("keras.applications.inception_resnet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.inception_resnet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/imagenet_utils_test.py | keras/src/applications/imagenet_utils_test.py | import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import testing
from keras.src.applications import imagenet_utils as utils
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
class TestImageNetUtils(testing.TestCase):
def test_preprocess_input(self):
# Test invalid mode check
x = np.random.uniform(0, 255, (10, 10, 3))
with self.assertRaises(ValueError):
utils.preprocess_input(x, mode="some_unknown_mode")
# Test image batch with float and int image input
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int32")
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, "channels_last")
out1int = utils.preprocess_input(xint, "channels_last")
out2 = utils.preprocess_input(
np.transpose(x, (0, 3, 1, 2)), "channels_first"
)
out2int = utils.preprocess_input(
np.transpose(xint, (0, 3, 1, 2)), "channels_first"
)
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
self.assertAllClose(out1int, out2int.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
xint = x.astype("int32")
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, "channels_last")
out1int = utils.preprocess_input(xint, "channels_last")
out2 = utils.preprocess_input(
np.transpose(x, (2, 0, 1)), "channels_first"
)
out2int = utils.preprocess_input(
np.transpose(xint, (2, 0, 1)), "channels_first"
)
self.assertAllClose(out1, out2.transpose(1, 2, 0))
self.assertAllClose(out1int, out2int.transpose(1, 2, 0))
# Test that writing over the input data works predictably
for mode in ["torch", "tf"]:
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int")
x2 = utils.preprocess_input(x, "channels_last", mode=mode)
xint2 = utils.preprocess_input(xint, "channels_last")
self.assertAllClose(x, x2)
self.assertNotEqual(xint.astype("float").max(), xint2.max())
# Caffe mode works differently from the others
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int")
x2 = utils.preprocess_input(
x, data_format="channels_last", mode="caffe"
)
xint2 = utils.preprocess_input(xint, data_format="channels_last")
self.assertAllClose(x, x2[..., ::-1])
self.assertNotEqual(xint.astype("float").max(), xint2.max())
@parameterized.named_parameters(
[
{"testcase_name": "mode_torch", "mode": "torch"},
{"testcase_name": "mode_tf", "mode": "tf"},
{"testcase_name": "mode_caffe", "mode": "caffe"},
]
)
@pytest.mark.requires_trainable_backend
def test_preprocess_input_symbolic(self, mode):
backend_data_format = backend.image_data_format()
# Test image batch
if backend_data_format == "channels_last":
x = np.random.uniform(0, 255, (2, 10, 10, 3))
elif backend_data_format == "channels_first":
x = np.random.uniform(0, 255, (2, 3, 10, 10))
inputs = keras.layers.Input(shape=x.shape[1:])
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode),
output_shape=x.shape[1:],
)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x).shape, x.shape)
x = np.random.uniform(0, 255, (2, 10, 10, 3))
inputs = keras.layers.Input(shape=x.shape[1:])
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_last", mode=mode),
output_shape=x.shape[1:],
)(inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x)
x2 = np.transpose(x, (0, 3, 1, 2))
inputs2 = keras.layers.Input(shape=x2.shape[1:])
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_first", mode=mode),
output_shape=x2.shape[1:],
)(inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2)
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
# Test single image
if backend_data_format == "channels_last":
x = np.random.uniform(0, 255, (10, 10, 3))
elif backend_data_format == "channels_first":
x = np.random.uniform(0, 255, (3, 10, 10))
inputs = keras.layers.Input(shape=x.shape)
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode), output_shape=x.shape
)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x[np.newaxis])[0].shape, x.shape)
x = np.random.uniform(0, 255, (10, 10, 3))
inputs = keras.layers.Input(shape=x.shape)
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_last", mode=mode),
output_shape=x.shape,
)(inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x[np.newaxis])[0]
x2 = np.transpose(x, (2, 0, 1))
inputs2 = keras.layers.Input(shape=x2.shape)
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_first", mode=mode),
output_shape=x2.shape,
)(inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2[np.newaxis])[0]
self.assertAllClose(out1, out2.transpose(1, 2, 0))
@parameterized.named_parameters(
[
{"testcase_name": "mode_torch", "mode": "torch"},
{"testcase_name": "mode_tf", "mode": "tf"},
{"testcase_name": "mode_caffe", "mode": "caffe"},
]
)
def test_preprocess_input_symbolic_mixed_precision(self, mode):
set_dtype_policy("mixed_float16")
shape = (20, 20, 3)
inputs = keras.layers.Input(shape=shape)
try:
keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode),
output_shape=shape,
)(inputs)
finally:
set_dtype_policy("float32")
@parameterized.named_parameters(
[
{
"testcase_name": "channels_last_format",
"data_format": "channels_last",
},
{
"testcase_name": "channels_first_format",
"data_format": "channels_first",
},
]
)
def test_obtain_input_shape(self, data_format):
# input_shape and default_size are not identical.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=(224, 224, 3),
default_size=299,
min_size=139,
data_format="channels_last",
require_flatten=True,
weights="imagenet",
)
# Test invalid use cases
shape = (139, 139)
if data_format == "channels_last":
input_shape = shape + (99,)
else:
input_shape = (99,) + shape
# input_shape is smaller than min_size.
shape = (100, 100)
if data_format == "channels_last":
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# shape is 1D.
shape = (100,)
if data_format == "channels_last":
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# the number of channels is 5 not 3.
shape = (100, 100)
if data_format == "channels_last":
input_shape = shape + (5,)
else:
input_shape = (5,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# require_flatten=True with dynamic input shape.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=True,
)
# test include top
self.assertEqual(
utils.obtain_input_shape(
input_shape=(3, 200, 200),
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=True,
),
(3, 200, 200),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(None, None, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=False,
),
(3, None, None),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(None, None, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=(150, 150, 3),
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(150, 150, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=(3, None, None),
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=False,
),
(3, None, None),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/vgg16.py | keras/src/applications/vgg16.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/vgg16/"
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_export(["keras.applications.vgg16.VGG16", "keras.applications.VGG16"])
def VGG16(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="vgg16",
):
"""Instantiates the VGG16 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG16, call `keras.applications.vgg16.preprocess_input` on your
inputs before passing them to the model.
`vgg16.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet
dataset, without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format) or
`(3, 224, 224)` (with `"channels_first"` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A `Model` instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. Received: "
f"weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv1"
)(img_input)
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block1_pool")(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv1"
)(x)
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block2_pool")(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv1"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv2"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block3_pool")(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block4_pool")(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block5_pool")(x)
if include_top:
# Classification block
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="64373286793e3c8b2b4e3219cbf3544b",
)
else:
weights_path = file_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="6d6bbae143d832006294945121d1f1fc",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.vgg16.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="caffe"
)
@keras_export("keras.applications.vgg16.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/applications/densenet.py | keras/src/applications/densenet.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.applications import imagenet_utils
from keras.src.models import Functional
from keras.src.ops import operation_utils
from keras.src.utils import file_utils
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/densenet/"
)
DENSENET121_WEIGHT_PATH = (
f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET121_WEIGHT_PATH_NO_TOP = (
f"{BASE_WEIGHTS_PATH}"
"densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET169_WEIGHT_PATH = (
f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET169_WEIGHT_PATH_NO_TOP = (
f"{BASE_WEIGHTS_PATH}"
"densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET201_WEIGHT_PATH = (
f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET201_WEIGHT_PATH_NO_TOP = (
f"{BASE_WEIGHTS_PATH}"
"densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
def dense_block(x, blocks, name):
"""A dense block.
Args:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=f"{name}_block{i + 1}")
return x
def transition_block(x, reduction, name):
"""A transition block.
Args:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_relu")(x)
x = layers.Conv2D(
int(x.shape[bn_axis] * reduction),
1,
use_bias=False,
name=f"{name}_conv",
)(x)
x = layers.AveragePooling2D(2, strides=2, name=f"{name}_pool")(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Args:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn"
)(x)
x1 = layers.Activation("relu", name=f"{name}_0_relu")(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=f"{name}_1_conv"
)(x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x1)
x1 = layers.Activation("relu", name=f"{name}_1_relu")(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding="same", use_bias=False, name=f"{name}_2_conv"
)(x1)
x = layers.Concatenate(axis=bn_axis, name=f"{name}_concat")([x, x1])
return x
def DenseNet(
blocks,
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="densenet",
):
"""Instantiates the DenseNet architecture.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `keras.applications.densenet.preprocess_input`
on your inputs before passing them to the model.
`densenet.preprocess_input` will scale pixels between 0 and 1 and then
will normalize each channel with respect to the ImageNet
dataset statistics.
Args:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable.
The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A model instance.
"""
if backend.image_data_format() == "channels_first":
raise ValueError(
"DenseNet does not support the `channels_first` image data "
"format. Switch to `channels_last` by editing your local "
"config file at ~/.keras/keras.json"
)
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name="conv1_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="conv1_bn"
)(x)
x = layers.Activation("relu", name="conv1_relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name="pool1")(x)
x = dense_block(x, blocks[0], name="conv2")
x = transition_block(x, 0.5, name="pool2")
x = dense_block(x, blocks[1], name="conv3")
x = transition_block(x, 0.5, name="pool3")
x = dense_block(x, blocks[2], name="conv4")
x = transition_block(x, 0.5, name="pool4")
x = dense_block(x, blocks[3], name="conv5")
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name="bn")(x)
x = layers.Activation("relu", name="relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=name)
# Load weights.
if weights == "imagenet":
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = file_utils.get_file(
"densenet121_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET121_WEIGHT_PATH,
cache_subdir="models",
file_hash="9d60b8095a5708f2dcce2bca79d332c7",
)
elif blocks == [6, 12, 32, 32]:
weights_path = file_utils.get_file(
"densenet169_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET169_WEIGHT_PATH,
cache_subdir="models",
file_hash="d699b8f76981ab1b30698df4c175e90b",
)
elif blocks == [6, 12, 48, 32]:
weights_path = file_utils.get_file(
"densenet201_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET201_WEIGHT_PATH,
cache_subdir="models",
file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
)
else:
raise ValueError("weights_path undefined")
else:
if blocks == [6, 12, 24, 16]:
weights_path = file_utils.get_file(
"densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="30ee3e1110167f948a6b9946edeeb738",
)
elif blocks == [6, 12, 32, 32]:
weights_path = file_utils.get_file(
"densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="b8c4d4c20dd625c148057b9ff1c1176b",
)
elif blocks == [6, 12, 48, 32]:
weights_path = file_utils.get_file(
"densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
)
else:
raise ValueError("weights_path undefined")
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export(
[
"keras.applications.densenet.DenseNet121",
"keras.applications.DenseNet121",
]
)
def DenseNet121(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="densenet121",
):
"""Instantiates the Densenet121 architecture."""
return DenseNet(
[6, 12, 24, 16],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
name=name,
)
@keras_export(
[
"keras.applications.densenet.DenseNet169",
"keras.applications.DenseNet169",
]
)
def DenseNet169(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="densenet169",
):
"""Instantiates the Densenet169 architecture."""
return DenseNet(
[6, 12, 32, 32],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
name=name,
)
@keras_export(
[
"keras.applications.densenet.DenseNet201",
"keras.applications.DenseNet201",
]
)
def DenseNet201(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
name="densenet201",
):
"""Instantiates the Densenet201 architecture."""
return DenseNet(
[6, 12, 48, 32],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
name=name,
)
@keras_export("keras.applications.densenet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="torch"
)
@keras_export("keras.applications.densenet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `keras.applications.densenet.preprocess_input`
on your inputs before passing them to the model.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified. Defaults to 1000.
classifier_activation: A `str` or callable.
The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits
of the "top" layer. When loading pretrained weights,
`classifier_activation` can only be `None` or `"softmax"`.
name: The name of the model (string).
Returns:
A Keras model instance.
"""
setattr(DenseNet121, "__doc__", DenseNet121.__doc__ + DOC)
setattr(DenseNet169, "__doc__", DenseNet169.__doc__ + DOC)
setattr(DenseNet201, "__doc__", DenseNet201.__doc__ + DOC)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/activations/activations_test.py | keras/src/activations/activations_test.py | import numpy as np
from keras.src import activations
from keras.src import backend
from keras.src import testing
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
def _ref_softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
def _ref_log_softmax(values):
max_val = np.max(values) # for numerical stability
stabilized_values = values - max_val
log_sum_exp = np.log(np.sum(np.exp(stabilized_values)))
return stabilized_values - log_sum_exp
def _ref_leaky_relu(x, alpha=0.2):
return x if x > 0 else alpha * x
def _ref_relu6(x):
return min(max(0, x), 6)
def _ref_silu(x):
return x / (1 + np.exp(-x))
def _ref_hard_sigmoid(x):
x = (x / 6.0) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
def _ref_sparse_sigmoid(x):
return np.where(x <= -1, 0, np.where(x >= 1, 1, 0.5 * (x + 1)))
def _ref_log_sigmoid(x):
return -1 * _ref_softplus(-x)
def _ref_hard_silu(x):
return x * np.minimum(np.maximum(0.0, x + 3.0), 6.0) * (1.0 / 6.0)
def _ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
def _ref_softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
class ActivationsTest(testing.TestCase):
def test_softmax(self):
x = np.random.random((2, 5))
result = activations.softmax(x[np.newaxis, :])[0]
expected = _ref_softmax(x[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
def test_softmax_2d_axis_0(self):
x = np.random.random((2, 5))
result = activations.softmax(x[np.newaxis, :], axis=1)[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(x[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_3d_axis_tuple(self):
x = np.random.random((2, 3, 5))
result = activations.softmax(x, axis=(1, 2))
expected = np.zeros((2, 3, 5))
for i in range(2):
expected[i, :, :] = _ref_softmax(x[i, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_1d(self):
x = np.random.random(5)
result = activations.softmax(x)
expected = _ref_softmax(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_higher_dim(self):
x = np.random.random((2, 3, 4, 5))
result = activations.softmax(x, axis=(2, 3))
expected = np.zeros((2, 3, 4, 5))
for i in range(2):
for j in range(3):
expected[i, j, :, :] = _ref_softmax(x[i, j, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_higher_dim_multiple_axes(self):
x = np.random.random((2, 3, 4, 5, 6))
result = activations.softmax(x, axis=(2, 3, 4))
expected = np.zeros((2, 3, 4, 5, 6))
for i in range(2):
for j in range(3):
expected[i, j, :, :, :] = _ref_softmax(x[i, j, :, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_negative_axis(self):
x = np.random.random((2, 5))
result = activations.softmax(x, axis=-1)
expected = np.zeros((2, 5))
for i in range(2):
expected[i, :] = _ref_softmax(x[i, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = np.random.random((2, 2, 3)) * 10
result = activations.softmax(x[np.newaxis, :])[0]
expected = _ref_softmax(x[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_log_softmax_2d_axis_0(self):
x = np.random.random((2, 5))
result = activations.log_softmax(x[np.newaxis, :], axis=1)[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_log_softmax(x[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_3d_axis_tuple(self):
x = np.random.random((2, 3, 5))
result = activations.log_softmax(x, axis=(1, 2))
expected = np.zeros((2, 3, 5))
for i in range(2):
expected[i, :, :] = _ref_log_softmax(x[i, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_1d(self):
x = np.random.random(5)
result = activations.log_softmax(x)
expected = _ref_log_softmax(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_higher_dim(self):
x = np.random.random((2, 3, 4, 5))
result = activations.log_softmax(x, axis=(2, 3))
expected = np.zeros((2, 3, 4, 5))
for i in range(2):
for j in range(3):
expected[i, j, :, :] = _ref_log_softmax(x[i, j, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_higher_dim_multiple_axes(self):
x = np.random.random((2, 3, 4, 5, 6))
result = activations.log_softmax(x, axis=(2, 3, 4))
expected = np.zeros((2, 3, 4, 5, 6))
for i in range(2):
for j in range(3):
expected[i, j, :, :, :] = _ref_log_softmax(x[i, j, :, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_negative_axis(self):
x = np.random.random((2, 5))
result = activations.log_softmax(x, axis=-1)
expected = np.zeros((2, 5))
for i in range(2):
expected[i, :] = _ref_log_softmax(x[i, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_log_softmax(self):
x = np.random.random((2, 2, 3)) * 10
result = activations.log_softmax(x[np.newaxis, :])[0]
expected = _ref_log_softmax(x[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = activations.selu(positive_values[np.newaxis, :])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = activations.selu(negative_values[np.newaxis, :])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.softplus(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_softplus)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.softplus(x_1d)
expected_1d = np.vectorize(_ref_softplus)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.softplus(x_3d)
expected_3d = np.vectorize(_ref_softplus)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.softplus(x_zero)
expected_zero = np.vectorize(_ref_softplus)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.softplus(x_large_positive)
expected_large_positive = np.vectorize(_ref_softplus)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.softplus(x_large_negative)
expected_large_negative = np.vectorize(_ref_softplus)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_softsign(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.softsign(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_softsign)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.softsign(x_1d)
expected_1d = np.vectorize(_ref_softsign)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.softsign(x_3d)
expected_3d = np.vectorize(_ref_softsign)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.softsign(x_zero)
expected_zero = np.vectorize(_ref_softsign)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.softsign(x_large_positive)
expected_large_positive = np.vectorize(_ref_softsign)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.softsign(x_large_negative)
expected_large_negative = np.vectorize(_ref_softsign)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.sigmoid(x_1d)
expected_1d = np.vectorize(_ref_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.sigmoid(x_3d)
expected_3d = np.vectorize(_ref_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.sigmoid(x_zero)
expected_zero = np.vectorize(_ref_sigmoid)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.sigmoid(x_large_positive)
expected_large_positive = np.vectorize(_ref_sigmoid)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.sigmoid(x_large_negative)
expected_large_negative = np.vectorize(_ref_sigmoid)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_hard_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.hard_sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_hard_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.hard_sigmoid(x_1d)
expected_1d = np.vectorize(_ref_hard_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.hard_sigmoid(x_3d)
expected_3d = np.vectorize(_ref_hard_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values much larger than 1
x_positive_above_1 = np.random.uniform(
5, 10, (2, 5)
) # Adjusted this range
result_positive_above_1 = activations.hard_sigmoid(x_positive_above_1)
expected_positive_above_1 = np.ones((2, 5))
self.assertAllClose(
result_positive_above_1, expected_positive_above_1, rtol=1e-05
)
def test_sparse_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.sparse_sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_sparse_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.sparse_sigmoid(x_1d)
expected_1d = np.vectorize(_ref_sparse_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.sparse_sigmoid(x_3d)
expected_3d = np.vectorize(_ref_sparse_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.sparse_sigmoid(x_large_positive)
expected_large_positive = np.vectorize(_ref_sparse_sigmoid)(
x_large_positive
)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.sparse_sigmoid(x_large_negative)
expected_large_negative = np.vectorize(_ref_sparse_sigmoid)(
x_large_negative
)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_log_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.log_sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_log_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.log_sigmoid(x_1d)
expected_1d = np.vectorize(_ref_log_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.log_sigmoid(x_3d)
expected_3d = np.vectorize(_ref_log_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.log_sigmoid(x_large_positive)
expected_large_positive = np.vectorize(_ref_log_sigmoid)(
x_large_positive
)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.log_sigmoid(x_large_negative)
expected_large_negative = np.vectorize(_ref_log_sigmoid)(
x_large_negative
)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_hard_silu(self):
# Basic test for random values between -3 and 3
x = np.random.uniform(-3, 3, (2, 5)).astype("float32")
result = activations.hard_silu(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_hard_silu)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5).astype("float32")
result_1d = activations.hard_silu(x_1d)
expected_1d = np.vectorize(_ref_hard_silu)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3)).astype("float32")
result_3d = activations.hard_silu(x_3d)
expected_3d = np.vectorize(_ref_hard_silu)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values much larger than 3
x_positive_above_3 = np.random.uniform(5, 10, (2, 5)).astype("float32")
result_positive_above_3 = activations.hard_silu(x_positive_above_3)
expected_positive_above_3 = x_positive_above_3
self.assertAllClose(
result_positive_above_3, expected_positive_above_3, rtol=1e-05
)
# Test with strictly negative values much smaller than -3
x_negatives = np.random.uniform(-10, -5, (2, 5)).astype("float32")
result = activations.hard_silu(x_negatives)
expected_zeros = np.zeros_like(x_negatives)
self.assertAllClose(result, expected_zeros, rtol=1e-05)
def test_relu_negative_slope(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only negative_slope
result_negative_slope = activations.relu(x, negative_slope=0.5)
expected_negative_slope = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
self.assertAllClose(
result_negative_slope, expected_negative_slope, rtol=1e-05
)
def test_relu_max_value(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only max_value
result_max_value = activations.relu(x, max_value=5.0)
expected_max_value = np.array([0.0, 0.0, 0.0, 5.0, 5.0])
self.assertAllClose(result_max_value, expected_max_value, rtol=1e-05)
def test_relu_threshold(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only threshold
result_threshold = activations.relu(x, threshold=5.0)
expected_threshold = np.array([-0.0, -0.0, 0.0, 0.0, 10.0])
self.assertAllClose(result_threshold, expected_threshold, rtol=1e-05)
def test_relu_combined_threshold_and_max_value(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with threshold and max_value
result_combined = activations.relu(x, threshold=5.0, max_value=5.0)
expected_combined = np.array([0.0, 0.0, 0.0, 0.0, 5.0])
self.assertAllClose(result_combined, expected_combined, rtol=1e-05)
def test_relu_combined_all_parameters(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with negative_slope, max_value, and threshold
result_combined = activations.relu(
x, negative_slope=0.5, max_value=5.0, threshold=5.0
)
expected_combined = np.array([-7.5, -5.0, -2.5, 0.0, 5.0])
self.assertAllClose(result_combined, expected_combined, rtol=1e-05)
def test_relu_to_trigger_relu6(self):
x = np.array([-10, -5, 0.0, 5, 10, 12])
result_relu6 = activations.relu(x, max_value=6.0)
expected_relu6 = np.array([0.0, 0.0, 0.0, 5.0, 6.0, 6.0])
self.assertAllClose(result_relu6, expected_relu6, rtol=1e-05)
def test_relu_to_trigger_leaky(self):
x = np.array([-10, -5, 0.0, 5, 10])
result_leaky = activations.relu(x, negative_slope=0.5)
expected_leaky = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
self.assertAllClose(result_leaky, expected_leaky, rtol=1e-05)
def test_relu(self):
# Basic test for positive values
positive_values = np.random.uniform(0.1, 10, (2, 5))
result = activations.relu(positive_values[np.newaxis, :])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
# Basic test for negative values
negative_values = np.random.uniform(-10, -0.1, (2, 5))
result = activations.relu(negative_values[np.newaxis, :])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.relu(x_1d)
expected_1d = np.maximum(0, x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.relu(x_3d)
expected_3d = np.maximum(0, x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.relu(x_zero)
expected_zero = np.maximum(0, x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(1e4, 1e5, (2, 5))
result_large_positive = activations.relu(x_large_positive)
self.assertAllClose(result_large_positive, x_large_positive, rtol=1e-05)
# Test large negative values
x_large_negative = np.random.uniform(-1e5, -1e4, (2, 5))
result_large_negative = activations.relu(x_large_negative)
expected_large_negative = np.zeros((2, 5))
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_leaky_relu(self):
leaky_relu_vectorized = np.vectorize(_ref_leaky_relu)
# Test for negative_slope = 0.01
# Test positive values
positive_values = np.random.random((2, 5))
result = activations.leaky_relu(
positive_values[np.newaxis, :], negative_slope=0.01
)[0]
expected = leaky_relu_vectorized(positive_values, alpha=0.01)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.leaky_relu(
negative_values[np.newaxis, :], negative_slope=0.01
)[0]
expected = leaky_relu_vectorized(negative_values, alpha=0.01)
self.assertAllClose(result, expected, rtol=1e-05)
# Test for negative_slope = 0.3
# Test positive values
positive_values = np.random.random((2, 5))
result = activations.leaky_relu(
positive_values[np.newaxis, :], negative_slope=0.3
)[0]
expected = leaky_relu_vectorized(positive_values, alpha=0.3)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.leaky_relu(
negative_values[np.newaxis, :], negative_slope=0.3
)[0]
expected = leaky_relu_vectorized(negative_values, alpha=0.3)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu6(self):
relu6_vectorized = np.vectorize(_ref_relu6)
# Test positive values less than 6
positive_values = np.random.uniform(0, 5.9, (2, 5))
result = activations.relu6(positive_values[np.newaxis, :])[0]
expected = relu6_vectorized(positive_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test positive values greater than 6
positive_values_above_6 = np.random.uniform(6.1, 10, (2, 5))
result = activations.relu6(positive_values_above_6[np.newaxis, :])[0]
expected = relu6_vectorized(positive_values_above_6)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.relu6(negative_values[np.newaxis, :])[0]
expected = relu6_vectorized(negative_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_silu(self):
silu_vectorized = np.vectorize(_ref_silu)
# Test positive values
positive_values = np.random.uniform(0, 5.9, (2, 5))
result = activations.silu(positive_values[np.newaxis, :])[0]
expected = silu_vectorized(positive_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test values around zero (to ensure sigmoid behaves correctly)
around_zero_values = np.random.uniform(-1, 1, (2, 5))
result = activations.silu(around_zero_values[np.newaxis, :])[0]
expected = silu_vectorized(around_zero_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-5.9, 0, (2, 5))
result = activations.silu(negative_values[np.newaxis, :])[0]
expected = silu_vectorized(negative_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return (
0.5
* x
* (
1.0
+ np.tanh(
np.sqrt(2.0 / np.pi)
* (x + 0.044715 * np.power(x, 3))
)
)
)
else:
from scipy.stats import norm
return x * norm.cdf(x)
x = np.random.random((2, 5))
result = activations.gelu(x[np.newaxis, :])[0]
expected = gelu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 5))
result = activations.gelu(x[np.newaxis, :], approximate=True)[0]
expected = gelu(x, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_celu(self):
def celu(x, alpha=1.0):
return np.maximum(x, 0.0) + alpha * np.expm1(
np.minimum(x, 0.0) / alpha
)
x = np.random.random((2, 5))
result = activations.celu(x[np.newaxis, :])[0]
expected = celu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 5))
result = activations.celu(x[np.newaxis, :], alpha=0.5)[0]
expected = celu(x, alpha=0.5)
self.assertAllClose(result, expected, rtol=1e-05)
def test_glu(self):
def glu(x, axis=-1):
x1, x2 = np.split(x, 2, axis)
return x1 * (1 / (1 + np.exp(-x2)))
x = np.random.random((2, 4))
result = activations.glu(x[np.newaxis, :])[0]
expected = glu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 4))
result = activations.glu(x[np.newaxis, :], axis=-2)[0]
expected = glu(x, axis=-2)
self.assertAllClose(result, expected, rtol=1e-05)
def test_tanh_shrink(self):
def tanh_shrink(x):
return x - np.tanh(x)
x = np.random.random((2, 5))
result = activations.tanh_shrink(x[np.newaxis, :])[0]
expected = tanh_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_tanh(self):
def hard_tanh(x):
return np.clip(x, -1.0, 1.0)
x = np.random.random((2, 5))
result = activations.hard_tanh(x[np.newaxis, :])[0]
expected = hard_tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_shrink(self):
def hard_shrink(x):
return np.where(np.abs(x) > 0.5, x, 0.0)
x = np.random.random((2, 5))
result = activations.hard_shrink(x[np.newaxis, :])[0]
expected = hard_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_threshold(self):
def threshold(x, threshold_value, value):
return np.where(
x > threshold_value, x, np.array(value, dtype=x.dtype)
)
x = np.random.random((2, 5))
result = activations.threshold(x[np.newaxis, :], 0, 0)[0]
expected = threshold(x, 0, 0)
self.assertAllClose(result, expected, rtol=1e-05)
def test_squareplus(self):
def squareplus(x, b=4):
y = x + np.sqrt(x**2 + b)
return y / 2
x = np.random.random((2, 5))
result = activations.squareplus(x[np.newaxis, :])[0]
expected = squareplus(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_soft_shrink(self):
def soft_shrink(x, threshold=0.5):
return np.where(
x > threshold,
x - threshold,
np.where(x < -threshold, x + threshold, 0.0),
)
x = np.random.random((2, 5))
result = activations.soft_shrink(x[np.newaxis, :])[0]
expected = soft_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sparse_plus(self):
def sparse_plus(x):
return np.where(
x <= -1,
np.zeros_like(x),
np.where(x < 1, (1 / 4) * (x + 1) ** 2, x),
)
x = np.random.random((2, 5))
result = activations.sparse_plus(x[np.newaxis, :])[0]
expected = sparse_plus(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = np.random.random((2, 5))
result = activations.elu(x[np.newaxis, :])[0]
self.assertAllClose(result, x, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = activations.elu(negative_values[np.newaxis, :])[0]
true_result = np.exp(negative_values) - 1
self.assertAllClose(result, true_result)
def test_tanh(self):
# Basic test for the tanh activation function
x = np.random.random((2, 5))
result = activations.tanh(x[np.newaxis, :])[0]
expected = np.tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Basic test for the tanh activation function
x = np.random.uniform(-10, 10, (2, 5))
result = activations.tanh(x[np.newaxis, :])[0]
expected = np.tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.tanh(x_1d)
expected_1d = np.tanh(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.tanh(x_3d)
expected_3d = np.tanh(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values
x_positive = np.random.uniform(0, 10, (2, 5))
result_positive = activations.tanh(x_positive)
expected_positive = np.tanh(x_positive)
self.assertAllClose(result_positive, expected_positive, rtol=1e-05)
# Test with strictly negative values
x_negative = np.random.uniform(-10, 0, (2, 5))
result_negative = activations.tanh(x_negative)
expected_negative = np.tanh(x_negative)
self.assertAllClose(result_negative, expected_negative, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.tanh(x_zero)
expected_zero = np.tanh(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large values to check stability
x_large = np.random.uniform(1e4, 1e5, (2, 5))
result_large = activations.tanh(x_large)
expected_large = np.tanh(x_large)
self.assertAllClose(result_large, expected_large, rtol=1e-05)
def test_exponential(self):
# Basic test for the exponential activation function
x = np.random.random((2, 5))
result = activations.exponential(x[np.newaxis, :])[0]
expected = np.exp(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.uniform(-10, 10, (2, 5))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/activations/__init__.py | keras/src/activations/__init__.py | import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
sparse_sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = serialization_lib.deserialize_keras_object(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/activations/activations.py | keras/src/activations/activations.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.activations.relu")
def relu(x, negative_slope=0.0, max_value=None, threshold=0.0):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
Examples:
>>> x = [-10, -5, 0.0, 5, 10]
>>> keras.activations.relu(x)
[ 0., 0., 0., 5., 10.]
>>> keras.activations.relu(x, negative_slope=0.5)
[-5. , -2.5, 0. , 5. , 10. ]
>>> keras.activations.relu(x, max_value=5.)
[0., 0., 0., 5., 5.]
>>> keras.activations.relu(x, threshold=5.)
[-0., -0., 0., 0., 10.]
Args:
x: Input tensor.
negative_slope: A `float` that controls the slope
for values lower than the threshold.
max_value: A `float` that sets the saturation threshold (the largest
value the function will return).
threshold: A `float` giving the threshold value of the activation
function below which values will be damped or set to zero.
Returns:
A tensor with the same shape and dtype as input `x`.
"""
if backend.any_symbolic_tensors((x,)):
return ReLU(
negative_slope=negative_slope,
max_value=max_value,
threshold=threshold,
)(x)
return ReLU.static_call(
x,
negative_slope=negative_slope,
max_value=max_value,
threshold=threshold,
)
class ReLU(ops.Operation):
def __init__(
self, negative_slope=0.0, max_value=None, threshold=0.0, name=None
):
super().__init__(name=name)
self.negative_slope = negative_slope
self.max_value = max_value
self.threshold = threshold
def call(self, x):
return self.static_call(
x,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def compute_output_spec(self, x):
return backend.KerasTensor(x.shape, x.dtype)
@staticmethod
def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
x = backend.convert_to_tensor(x)
if negative_slope != 0.0:
if max_value is None and threshold == 0:
return backend.nn.leaky_relu(x, negative_slope=negative_slope)
if threshold != 0:
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
else:
negative_part = 1
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
threshold = ops.cast(threshold, dtype=x.dtype)
x = x * backend.cast(
backend.numpy.greater(x, threshold), dtype=x.dtype
)
elif max_value == 6:
# if no threshold, then can use nn.relu6 native op for performance
x = backend.nn.relu6(x)
clip_max = False
else:
x = backend.nn.relu(x)
if clip_max:
min_value = ops.cast(0.0, dtype=x.dtype)
max_value = ops.cast(max_value, dtype=x.dtype)
x = backend.numpy.clip(x, min_value, max_value)
if negative_slope != 0.0:
x -= negative_slope * negative_part
return x
@keras_export("keras.activations.leaky_relu")
def leaky_relu(x, negative_slope=0.2):
"""Leaky relu activation function.
Args:
x: Input tensor.
negative_slope: A `float` that controls the slope
for values lower than the threshold.
"""
return ops.leaky_relu(x, negative_slope=negative_slope)
@keras_export("keras.activations.relu6")
def relu6(x):
"""Relu6 activation function.
It's the ReLU function, but truncated to a maximum value of 6.
Args:
x: Input tensor.
"""
return ops.relu6(x)
@keras_export("keras.activations.softmax")
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range `[0, 1]` and sum to 1.
Each input vector is handled independently.
The `axis` argument sets which axis of the input the function
is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
"""
output = ops.softmax(x, axis=axis)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_export("keras.activations.elu")
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is defined as:
- `x` if `x > 0`
- alpha * `exp(x) - 1` if `x < 0`
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Args:
x: Input tensor.
alpha: A scalar, slope of positive section. Defaults to `1.0`.
Reference:
- [Clevert et al., 2016](https://arxiv.org/abs/1511.07289)
"""
return ops.elu(x, alpha=alpha)
@keras_export("keras.activations.selu")
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `scale * x` if `x > 0`
- `scale * alpha * (exp(x) - 1)` if `x < 0`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Args:
x: Input tensor.
Notes:
- To be used together with the
`keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`keras.layers.AlphaDropout` (rather than regular dropout).
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return ops.selu(x)
@keras_export("keras.activations.softplus")
def softplus(x):
"""Softplus activation function.
It is defined as: `softplus(x) = log(exp(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softplus(x)
@keras_export("keras.activations.softsign")
def softsign(x):
"""Softsign activation function.
Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softsign(x)
@keras_export("keras.activations.soft_shrink")
def soft_shrink(x, threshold=0.5):
"""Soft Shrink activation function.
It is defined as:
`soft_shrink(x) = x - threshold` if `x > threshold`,
`soft_shrink(x) = x + threshold` if `x < -threshold`,
`soft_shrink(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
"""
return ops.soft_shrink(x, threshold=threshold)
@keras_export("keras.activations.sparse_plus")
def sparse_plus(x):
"""SparsePlus activation function.
SparsePlus is defined as:
`sparse_plus(x) = 0` for `x <= -1`.
`sparse_plus(x) = (1/4) * (x + 1)^2` for `-1 < x < 1`.
`sparse_plus(x) = x` for `x >= 1`.
Args:
x: Input tensor.
"""
return ops.sparse_plus(x)
@keras_export(["keras.activations.silu", "keras.activations.swish"])
def silu(x):
"""Swish (or Silu) activation function.
It is defined as: `swish(x) = x * sigmoid(x)`.
The Swish (or Silu) activation function is a smooth,
non-monotonic function that is unbounded above and
bounded below.
Args:
x: Input tensor.
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return ops.silu(x)
@keras_export("keras.activations.squareplus")
def squareplus(x, b=4):
"""Squareplus activation function.
The Squareplus activation function is defined as:
`f(x) = (x + sqrt(x^2 + b)) / 2`
Where `b` is a smoothness parameter.
Args:
x: Input tensor.
b: Smoothness parameter. Defaults to 4.
Reference:
- [Ramachandran et al., 2021](https://arxiv.org/abs/2112.11687)
"""
return ops.squareplus(x, b=b)
@keras_export("keras.activations.gelu")
def gelu(x, approximate=False):
"""Gaussian error linear unit (GELU) activation function.
The Gaussian error linear unit (GELU) is defined as:
`gelu(x) = x * P(X <= x)` where `P(X) ~ N(0, 1)`,
i.e. `gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))`.
GELU weights inputs by their value, rather than gating
inputs by their sign as in ReLU.
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Reference:
- [Hendrycks et al., 2016](https://arxiv.org/abs/1606.08415)
"""
return ops.gelu(x, approximate=approximate)
@keras_export("keras.activations.celu")
def celu(x, alpha=1.0):
"""Continuously Differentiable Exponential Linear Unit.
The CeLU activation function is defined as:
`celu(x) = alpha * (exp(x / alpha) - 1) for x < 0`,`celu(x) = x for x >= 0`.
where `alpha` is a scaling parameter that controls the activation's shape.
Args:
x: Input tensor.
alpha: The α value for the CeLU formulation. Defaults to `1.0`.
Reference:
- [Barron, J. T., 2017](https://arxiv.org/abs/1704.07483)
"""
return ops.celu(x, alpha=alpha)
@keras_export("keras.activations.glu")
def glu(x, axis=-1):
"""Gated Linear Unit (GLU) activation function.
The GLU activation function is defined as:
`glu(x) = a * sigmoid(b)`,
where `x` is split into two equal parts `a` and `b` along the given axis.
Args:
x: Input tensor.
axis: The axis along which to split the input tensor. Defaults to `-1`.
Reference:
- [Dauphin et al., 2017](https://arxiv.org/abs/1612.08083)
"""
return ops.glu(x, axis=axis)
@keras_export("keras.activations.tanh")
def tanh(x):
"""Hyperbolic tangent activation function.
It is defined as:
`tanh(x) = sinh(x) / cosh(x)`, i.e.
`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.
Args:
x: Input tensor.
"""
return ops.tanh(x)
@keras_export("keras.activations.tanh_shrink")
def tanh_shrink(x):
"""Tanh shrink activation function.
It is defined as:
`f(x) = x - tanh(x)`.
Args:
x: Input tensor.
"""
return ops.tanh_shrink(x)
@keras_export("keras.activations.hard_tanh")
def hard_tanh(x):
"""HardTanh activation function.
It is defined as:
`hard_tanh(x) = -1 for x < -1`,
`hard_tanh(x) = x for -1 <= x <= 1`,
`hard_tanh(x) = 1 for x > 1`.
Args:
x: Input tensor.
"""
return ops.hard_tanh(x)
@keras_export("keras.activations.hard_shrink")
def hard_shrink(x, threshold=0.5):
"""Hard Shrink activation function.
It is defined as:
`hard_shrink(x) = x` if `|x| > threshold`,
`hard_shrink(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
"""
return ops.hard_shrink(x, threshold=threshold)
@keras_export("keras.activations.threshold")
def threshold(x, threshold, default_value):
"""Threshold activation function.
It is defined as:
`threshold(x) = x` if `x > threshold`,
`threshold(x) = default_value` otherwise.
Args:
x: Input tensor.
threshold: The value that decides when to retain or replace x.
default_value: Value to assign when `x <= threshold`.
"""
return ops.threshold(x, threshold, default_value)
@keras_export("keras.activations.sigmoid")
def sigmoid(x):
"""Sigmoid activation function.
It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.
For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
Args:
x: Input tensor.
"""
output = ops.sigmoid(x)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_export("keras.activations.exponential")
def exponential(x):
"""Exponential activation function.
Args:
x: Input tensor.
"""
return ops.exp(x)
@keras_export("keras.activations.hard_sigmoid")
def hard_sigmoid(x):
"""Hard sigmoid activation function.
The hard sigmoid activation is defined as:
- `0` if `if x <= -3`
- `1` if `x >= 3`
- `(x/6) + 0.5` if `-3 < x < 3`
It's a faster, piecewise linear approximation
of the sigmoid activation.
Args:
x: Input tensor.
Reference:
- [Wikipedia "Hard sigmoid"](https://en.wikipedia.org/wiki/Hard_sigmoid)
"""
return ops.hard_sigmoid(x)
@keras_export("keras.activations.log_sigmoid")
def log_sigmoid(x):
"""Logarithm of the sigmoid activation function.
It is defined as `f(x) = log(1 / (1 + exp(-x)))`.
Args:
x: Input tensor.
"""
return ops.log_sigmoid(x)
@keras_export("keras.activations.sparse_sigmoid")
def sparse_sigmoid(x):
"""Sparse sigmoid activation function.
It is defined as
`f(x) = 0` for `x <= -1`,
`f(x) = 0.5 * (x + 1)` for `-1 < x < 1`,
`f(x) = 1` for `x >= 1`.
Args:
x: Input tensor.
Reference:
- [M. Blondel, A. F. T. Martins, V. Niculae, 2019](https://arxiv.org/pdf/1901.02324)
"""
return ops.sparse_sigmoid(x)
@keras_export(["keras.activations.hard_silu", "keras.activations.hard_swish"])
def hard_silu(x):
"""Hard SiLU activation function, also known as Hard Swish.
It is defined as:
- `0` if `if x < -3`
- `x` if `x > 3`
- `x * (x + 3) / 6` if `-3 <= x <= 3`
It's a faster, piecewise linear approximation of the silu activation.
Args:
x: Input tensor.
Reference:
- [A Howard, 2019](https://arxiv.org/abs/1905.02244)
"""
x = backend.convert_to_tensor(x)
return ops.hard_silu(x)
@keras_export("keras.activations.linear")
def linear(x):
"""Linear activation function (pass-through).
A "linear" activation is an identity function:
it returns the input, unmodified.
Args:
x: Input tensor.
"""
return x
class Mish(ops.Operation):
def call(self, x):
return self.static_call(x)
def compute_output_spec(self, x):
return backend.KerasTensor(x.shape, x.dtype)
@staticmethod
def static_call(x):
return x * backend.nn.tanh(backend.nn.softplus(x))
@keras_export("keras.activations.mish")
def mish(x):
"""Mish activation function.
It is defined as:
`mish(x) = x * tanh(softplus(x))`
where `softplus` is defined as:
`softplus(x) = log(exp(x) + 1)`
Args:
x: Input tensor.
Reference:
- [Misra, 2019](https://arxiv.org/abs/1908.08681)
"""
x = backend.convert_to_tensor(x)
return Mish.static_call(x)
@keras_export("keras.activations.log_softmax")
def log_softmax(x, axis=-1):
"""Log-Softmax activation function.
Each input vector is handled independently.
The `axis` argument sets which axis of the input the function
is applied along.
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
"""
return ops.log_softmax(x, axis=axis)
@keras_export(["keras.activations.sparsemax"])
def sparsemax(x, axis=-1):
"""Sparsemax activation function.
For each batch `i`, and class `j`,
sparsemax activation function is defined as:
`sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).`
Args:
x: Input tensor.
axis: `int`, axis along which the sparsemax operation is applied.
Returns:
A tensor, output of sparsemax transformation. Has the same type and
shape as `x`.
Reference:
- [Martins et.al., 2016](https://arxiv.org/abs/1602.02068)
"""
x = backend.convert_to_tensor(x)
return ops.sparsemax(x, axis)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/summary_utils.py | keras/src/utils/summary_utils.py | import functools
import math
import re
import shutil
import rich
import rich.console
import rich.markup
# See https://github.com/keras-team/keras/issues/448
# for below imports
import rich.table
from keras.src import backend
from keras.src import tree
from keras.src.utils import dtype_utils
from keras.src.utils import io_utils
def count_params(weights):
shapes = [v.shape for v in weights]
return int(sum(math.prod(p) for p in shapes))
@functools.lru_cache(512)
def _compute_memory_size(shape, dtype):
weight_counts = math.prod(shape)
dtype = backend.standardize_dtype(dtype)
per_param_size = dtype_utils.dtype_size(dtype)
return weight_counts * per_param_size
def weight_memory_size(weights):
"""Compute the memory footprint for weights based on their dtypes.
Args:
weights: An iterable contains the weights to compute weight size.
Returns:
The total memory size (in Bytes) of the weights.
"""
unique_weights = {id(w): w for w in weights}.values()
total_memory_size = 0
for w in unique_weights:
total_memory_size += _compute_memory_size(w.shape, w.dtype)
return total_memory_size / 8
def readable_memory_size(weight_memory_size):
"""Convert the weight memory size (Bytes) to a readable string."""
units = ["B", "KB", "MB", "GB", "TB", "PB"]
scale = 1024
for unit in units:
if weight_memory_size / scale < 1:
return "{:.2f} {}".format(weight_memory_size, unit)
else:
weight_memory_size /= scale
return "{:.2f} {}".format(weight_memory_size, units[-1])
def highlight_number(x):
"""Themes numbers in a summary using rich markup.
We use a separate color for `None`s, e.g. in a layer shape.
"""
if x is None:
return f"[color(45)]{x}[/]"
else:
return f"[color(34)]{x}[/]"
def highlight_symbol(x):
"""Themes keras symbols in a summary using rich markup."""
return f"[color(33)]{x}[/]"
def bold_text(x, color=None):
"""Bolds text using rich markup."""
if color:
return f"[bold][color({color})]{x}[/][/]"
return f"[bold]{x}[/]"
def format_layer_shape(layer):
if not layer._inbound_nodes and not layer._build_shapes_dict:
return "?"
def format_shape(shape):
highlighted = [highlight_number(x) for x in shape]
return f"({', '.join(highlighted)})"
# There are 2 approaches to get output shapes:
# 1. Using `layer._inbound_nodes`, which is possible if the model is a
# Sequential or Functional.
# 2. Using `layer._build_shapes_dict`, which is possible if users manually
# build the layer.
if len(layer._inbound_nodes) > 0:
for i in range(len(layer._inbound_nodes)):
outputs = layer._inbound_nodes[i].output_tensors
output_shapes = tree.map_structure(
lambda x: format_shape(x.shape), outputs
)
else:
try:
if hasattr(layer, "output_shape"):
output_shapes = format_shape(layer.output_shape)
else:
outputs = layer.compute_output_shape(**layer._build_shapes_dict)
output_shapes = tree.map_shape_structure(
lambda x: format_shape(x), outputs
)
except NotImplementedError:
return "?"
if len(output_shapes) == 1:
return output_shapes[0]
out = str(output_shapes)
out = out.replace("'", "")
return out
def print_summary(
model,
line_length=None,
positions=None,
print_fn=None,
expand_nested=False,
show_trainable=False,
layer_range=None,
):
"""Prints a summary of a model.
Args:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[0.3, 0.6, 0.70, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
expand_nested: Whether to expand the nested models.
If not provided, defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
If not provided, defaults to `False`.
layer_range: List or tuple containing two strings,
the starting layer name and ending layer name (both inclusive),
indicating the range of layers to be printed in the summary. The
strings could also be regexes instead of an exact name. In this
case, the starting layer will be the first layer that matches
`layer_range[0]` and the ending layer will be the last element that
matches `layer_range[1]`. By default (`None`) all
layers in the model are included in the summary.
"""
from keras.src.models import Functional
from keras.src.models import Sequential
if not print_fn and not io_utils.is_interactive_logging_enabled():
print_fn = io_utils.print_msg
if isinstance(model, Sequential):
sequential_like = True
layers = model.layers
elif not isinstance(model, Functional):
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
layers = model.layers
else:
layers = model._operations
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (
len(v) == 1 and len(tree.flatten(v[0].input_tensors)) > 1
):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
default_line_length = 88
positions = positions or [0.45, 0.80, 1.0]
# header names for the different log elements
header = ["Layer (type)", "Output Shape", "Param #"]
alignment = ["left", "left", "right"]
else:
default_line_length = 108
positions = positions or [0.3, 0.56, 0.74, 1.0]
# header names for the different log elements
header = ["Layer (type)", "Output Shape", "Param #", "Connected to"]
alignment = ["left", "left", "right", "left"]
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
if show_trainable:
default_line_length += 12
positions = [p * 0.90 for p in positions] + [1.0]
header.append("Trainable")
alignment.append("center")
# Compute columns widths
default_line_length = min(
default_line_length, shutil.get_terminal_size().columns - 4
)
line_length = line_length or default_line_length
column_widths = []
current = 0
for pos in positions:
width = int(pos * line_length) - current
if width < 4:
raise ValueError("Insufficient console width to print summary.")
column_widths.append(width)
current += width
# Render summary as a rich table.
columns = []
# Right align parameter counts.
for i, name in enumerate(header):
column = rich.table.Column(
name,
justify=alignment[i],
width=column_widths[i],
)
columns.append(column)
table = rich.table.Table(*columns, width=line_length, show_lines=True)
def get_connections(layer):
connections = ""
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for kt in node.input_tensors:
keras_history = kt._keras_history
inbound_layer = keras_history.operation
node_index = highlight_number(keras_history.node_index)
tensor_index = highlight_number(keras_history.tensor_index)
if connections:
connections += ", "
connections += (
f"{inbound_layer.name}[{node_index}][{tensor_index}]"
)
if not connections:
connections = "-"
return connections
def get_layer_fields(layer, prefix=""):
output_shape = format_layer_shape(layer)
name = f"{prefix}{layer.name}"
cls_name = layer.__class__.__name__
name = rich.markup.escape(name)
name += f" ({highlight_symbol(rich.markup.escape(cls_name))})"
if not hasattr(layer, "built"):
params = highlight_number(0)
elif not layer.built:
params = f"{highlight_number(0)} (unbuilt)"
else:
params = highlight_number(f"{layer.count_params():,}")
fields = [name, output_shape, params]
if not sequential_like:
fields.append(get_connections(layer))
if show_trainable:
if hasattr(layer, "weights") and len(layer.weights) > 0:
fields.append(
bold_text("Y", color=34)
if layer.trainable
else bold_text("N", color=9)
)
else:
fields.append(bold_text("-"))
return fields
def print_layer(layer, nested_level=0):
if nested_level:
prefix = " " * nested_level + "└ "
else:
prefix = ""
fields = get_layer_fields(layer, prefix=prefix)
rows = [fields]
if expand_nested and hasattr(layer, "layers") and layer.layers:
nested_layers = layer.layers
nested_level += 1
for i in range(len(nested_layers)):
rows.extend(
print_layer(nested_layers[i], nested_level=nested_level)
)
return rows
# Render all layers to the rich table.
layer_range = get_layer_index_bound_by_layer_name(layers, layer_range)
for layer in layers[layer_range[0] : layer_range[1]]:
for row in print_layer(layer):
table.add_row(*row)
# After the table, append information about parameter count and size.
if hasattr(model, "_collected_trainable_weights"):
trainable_count = count_params(model._collected_trainable_weights)
trainable_memory_size = weight_memory_size(
model._collected_trainable_weights
)
else:
trainable_count = count_params(model.trainable_weights)
trainable_memory_size = weight_memory_size(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
non_trainable_memory_size = weight_memory_size(model.non_trainable_weights)
if model.compiled and model.optimizer and model.optimizer.built:
optimizer_weight_count = count_params(model.optimizer.variables)
optimizer_memory_size = weight_memory_size(model.optimizer.variables)
optimizer_built = True
else:
optimizer_weight_count = 0
optimizer_memory_size = 0
optimizer_built = False
total_count = trainable_count + non_trainable_count + optimizer_weight_count
total_memory_size = (
trainable_memory_size
+ non_trainable_memory_size
+ optimizer_memory_size
)
# Create a rich console for printing. Capture for non-interactive logging.
if print_fn:
console = rich.console.Console(
highlight=False, force_terminal=False, color_system=None
)
console.begin_capture()
else:
console = rich.console.Console(highlight=False)
# Print the to the console.
console.print(bold_text(f'Model: "{rich.markup.escape(model.name)}"'))
console.print(table)
console.print(
bold_text(" Total params: ")
+ highlight_number(f"{total_count:,}")
+ f" ({readable_memory_size(total_memory_size)})"
)
console.print(
bold_text(" Trainable params: ")
+ highlight_number(f"{trainable_count:,}")
+ f" ({readable_memory_size(trainable_memory_size)})"
)
console.print(
bold_text(" Non-trainable params: ")
+ highlight_number(f"{non_trainable_count:,}")
+ f" ({readable_memory_size(non_trainable_memory_size)})"
)
if optimizer_built:
console.print(
bold_text(" Optimizer params: ")
+ highlight_number(f"{optimizer_weight_count:,}")
+ f" ({readable_memory_size(optimizer_memory_size)})"
)
# Output captured summary for non-interactive logging.
if print_fn:
if print_fn is io_utils.print_msg:
print_fn(console.end_capture(), line_break=False)
else:
print_fn(console.end_capture())
def get_layer_index_bound_by_layer_name(layers, layer_range=None):
"""Get the layer indexes from the model based on layer names.
The layer indexes can be used to slice the model into sub models for
display.
Args:
model: `Model` instance.
layer_names: a list or tuple of 2 strings, the starting layer name and
ending layer name (both inclusive) for the result. All layers will
be included when `None` is provided.
Returns:
The index value of layer based on its unique name (layer_names).
Output will be [first_layer_index, last_layer_index + 1].
"""
if layer_range is not None:
if len(layer_range) != 2:
raise ValueError(
"layer_range must be a list or tuple of length 2. Received: "
f"layer_range = {layer_range} of length {len(layer_range)}"
)
if not isinstance(layer_range[0], str) or not isinstance(
layer_range[1], str
):
raise ValueError(
"layer_range should contain string type only. "
f"Received: {layer_range}"
)
else:
return [0, len(layers)]
lower_index = [
idx
for idx, layer in enumerate(layers)
if re.match(layer_range[0], layer.name)
]
upper_index = [
idx
for idx, layer in enumerate(layers)
if re.match(layer_range[1], layer.name)
]
if not lower_index or not upper_index:
raise ValueError(
"Passed layer_names do not match the layer names in the model. "
f"Received: {layer_range}"
)
if min(lower_index) > max(upper_index):
return [min(upper_index), max(lower_index) + 1]
return [min(lower_index), max(upper_index) + 1]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/torch_utils.py | keras/src/utils/torch_utils.py | import base64
import io
from packaging.version import parse
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers import Layer
from keras.src.ops import convert_to_numpy
from keras.src.ops import convert_to_tensor
from keras.src.saving.serialization_lib import in_safe_mode
@keras_export("keras.layers.TorchModuleWrapper")
class TorchModuleWrapper(Layer):
"""Torch module wrapper layer.
`TorchModuleWrapper` is a wrapper class that can turn any
`torch.nn.Module` into a Keras layer, in particular by making its
parameters trackable by Keras.
`TorchModuleWrapper` is only compatible with the PyTorch backend and
cannot be used with the TensorFlow or JAX backends.
Args:
module: `torch.nn.Module` instance. If it's a `LazyModule`
instance, then its parameters must be initialized before
passing the instance to `TorchModuleWrapper` (e.g. by calling
it once).
output_shape :The shape of the output of this layer. It helps Keras
perform automatic shape inference.
name: The name of the layer (string).
Example:
Here's an example of how the `TorchModuleWrapper` can be used with vanilla
PyTorch modules.
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import keras
from keras.layers import TorchModuleWrapper
class Classifier(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Wrap `torch.nn.Module`s with `TorchModuleWrapper`
# if they contain parameters
self.conv1 = TorchModuleWrapper(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3))
)
self.conv2 = TorchModuleWrapper(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
)
self.pool = nn.MaxPool2d(kernel_size=(2, 2))
self.flatten = nn.Flatten()
self.dropout = nn.Dropout(p=0.5)
self.fc = TorchModuleWrapper(nn.Linear(1600, 10))
def call(self, inputs):
x = F.relu(self.conv1(inputs))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = self.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.softmax(x, dim=1)
model = Classifier()
model.build((1, 28, 28))
print("Output shape:", model(torch.ones(1, 1, 28, 28).to("cuda")).shape)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"]
)
model.fit(train_loader, epochs=5)
```
"""
def __init__(self, module, name=None, output_shape=None, **kwargs):
super().__init__(name=name, **kwargs)
import torch.nn as nn
from keras.src.backend.torch.core import get_device
if (
isinstance(module, nn.modules.lazy.LazyModuleMixin)
and module.has_uninitialized_params()
):
raise ValueError(
"LazyModules are not supported unless they "
"are already initialized. "
f"Received uninitialized LazyModule: module={module}"
)
self.module = module.to(get_device())
self._track_module_parameters()
self.output_shape = output_shape
def parameters(self, recurse=True):
return self.module.parameters(recurse=recurse)
def _track_module_parameters(self):
for param in self.module.parameters():
# The Variable will reuse the raw `param`
# and simply wrap it.
variable = backend.Variable(
initializer=param, trainable=param.requires_grad
)
self._track_variable(variable)
self.built = True
def call(self, *args, training=None, **kwargs):
if training is False:
self.eval()
else:
self.train()
return self.module(*args, **kwargs)
def save_own_variables(self, store):
"""Saves model's state from `state_dict`.
`model.parameters` excludes some of model's state like
`BatchNorm` mean and variance. So, use `state_dict` to obtain
all of model's state.
"""
state_dict = self.module.state_dict()
for key in state_dict.keys():
store[key] = convert_to_numpy(state_dict[key])
def load_own_variables(self, store):
"""Loads model's state via `state_dict`."""
state_dict = {}
for key in store.keys():
if isinstance(key, bytes):
key = key.decode()
state_dict[key] = convert_to_tensor(store[key])
self.module.load_state_dict(state_dict)
def compute_output_shape(self, input_shape):
if self.output_shape is None:
return super().compute_output_shape(input_shape)
return self.output_shape
def get_config(self):
base_config = super().get_config()
import torch
buffer = io.BytesIO()
torch.save(self.module, buffer)
# Encode the buffer using base64 to ensure safe serialization
buffer_b64 = base64.b64encode(buffer.getvalue()).decode("ascii")
config = {
"module": buffer_b64,
"output_shape": self.output_shape,
}
return {**base_config, **config}
@classmethod
def from_config(cls, config):
import torch
if "module" in config:
if in_safe_mode():
raise ValueError(
"Requested the deserialization of a `torch.nn.Module` "
"object via `torch.load()`. This carries a potential risk "
"of arbitrary code execution and thus it is disallowed by "
"default. If you trust the source of the artifact, you can "
"override this error by passing `safe_mode=False` to the "
"loading function, or calling "
"`keras.config.enable_unsafe_deserialization()."
)
# Decode the base64 string back to bytes
buffer_bytes = base64.b64decode(config["module"].encode("ascii"))
buffer = io.BytesIO(buffer_bytes)
config["module"] = torch.load(buffer, weights_only=False)
return cls(**config)
def no_grad(orig_func):
import torch
if parse(torch.__version__) >= parse("2.1.0"):
return torch.no_grad(orig_func)
else:
return orig_func
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/naming.py | keras/src/utils/naming.py | import collections
import re
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def auto_name(prefix):
prefix = to_snake_case(prefix)
return uniquify(prefix)
def uniquify(name):
object_name_uids = global_state.get_global_attribute(
"object_name_uids",
default=collections.defaultdict(int),
set_to_default=True,
)
if name in object_name_uids:
unique_name = f"{name}_{object_name_uids[name]}"
else:
unique_name = name
object_name_uids[name] += 1
return unique_name
def to_snake_case(name):
name = re.sub(r"\W+", "", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
return name
@keras_export("keras.backend.get_uid")
def get_uid(prefix=""):
"""Associates a string prefix with an integer counter.
Args:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
"""
object_name_uids = global_state.get_global_attribute(
"object_name_uids",
default=collections.defaultdict(int),
set_to_default=True,
)
object_name_uids[prefix] += 1
return object_name_uids[prefix]
def reset_uids():
global_state.set_global_attribute(
"object_name_uids", collections.defaultdict(int)
)
def get_object_name(obj):
if hasattr(obj, "name"): # Most Keras objects.
return obj.name
elif hasattr(obj, "__name__"): # Function.
return to_snake_case(obj.__name__)
elif hasattr(obj, "__class__"): # Class instance.
return to_snake_case(obj.__class__.__name__)
return to_snake_case(str(obj))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/audio_dataset_utils_test.py | keras/src/utils/audio_dataset_utils_test.py | import os
import numpy as np
from keras.src import testing
from keras.src.utils import audio_dataset_utils
from keras.src.utils.module_utils import tensorflow as tf
class AudioDatasetFromDirectoryTest(testing.TestCase):
def _get_audio_samples(self, count=16, different_sequence_lengths=False):
sequence_length = 30
num_channels = 1
audio_samples = []
for _ in range(count):
if different_sequence_lengths:
random_sequence_length = np.random.randint(
10, sequence_length + 1
)
audio = np.random.random((random_sequence_length, num_channels))
else:
audio = np.random.random((sequence_length, num_channels))
audio_samples.append(tf.audio.encode_wav(audio, 1000))
return audio_samples
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
count=16,
different_sequence_lengths=False,
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save audio samples to the paths
i = 0
for audio in self._get_audio_samples(
count=count, different_sequence_lengths=different_sequence_lengths
):
path = paths[i % len(paths)]
ext = "wav"
filename = os.path.join(path, f"audio_{i}.{ext}")
with open(os.path.join(temp_dir, filename), "wb") as f:
f.write(audio.numpy())
i += 1
return temp_dir
def test_audio_dataset_from_directory_standalone(self):
# Test retrieving audio samples without labels from a directory and its
# subdirs.
# Save a few extra audio in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, audio in enumerate(self._get_audio_samples(3)):
filename = f"audio_{i}.wav"
with open(os.path.join(directory, filename), "wb") as f:
f.write(audio.numpy())
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=5, output_sequence_length=30, labels=None
)
batch = next(iter(dataset))
# We return plain audio
self.assertEqual(batch.shape, (5, 30, 1))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_audio_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="binary",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 30, None])
symbolic_fn(dataset)
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_audio_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_audio_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 30, 1))
def test_audio_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
labels=[0, 1],
shuffle=False,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_audio_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_audio_dataset_from_directory_no_audio(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(
ValueError, "No audio files found in directory"
):
_ = audio_dataset_utils.audio_dataset_from_directory(directory)
def test_audio_dataset_from_directory_ragged(self):
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, batch_size=8
)
batch = next(iter(dataset))
self.assertEqual(batch[0].shape.as_list(), [8, None, None])
def test_audio_dataset_from_directory_no_output_sequence_length_no_ragged(
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are different.
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
min_sequence_length, max_sequence_length = 10, 30
possible_sequence_lengths = [
i for i in range(min_sequence_length, max_sequence_length + 1)
]
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([b.shape[1] for b, _ in dataset]))
for seq_len in sequence_lengths:
self.assertIn(seq_len, possible_sequence_lengths)
def test_audio_dataset_from_directory_no_output_sequence_length_same_lengths( # noqa: E501
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are the same
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=False
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([batch[0].shape[1] for batch in dataset]))
self.assertEqual(len(sequence_lengths), 1)
def test_audio_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(
ValueError, "`sampling_rate` should be higher than 0. Received:"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=-1,
)
with self.assertRaisesRegex(
ValueError,
"`sampling_rate` should have an integer value. Received:",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=1.2,
)
# Only run this test case when we don't have tensorflow_io.
try:
import tensorflow_io # noqa: F401
except ImportError:
with self.assertRaisesRegex(
ImportError,
"To use the argument `sampling_rate`.*tensorflow_io.*",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=44100,
)
with self.assertRaisesRegex(
ValueError, "Cannot set both `ragged` and `output_sequence_length`"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, output_sequence_length=30
)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError, '`subset` must be either "training",'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_audio_dataset_from_directory_not_batched(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=None,
output_sequence_length=30,
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/file_utils.py | keras/src/utils/file_utils.py | import hashlib
import os
import re
import shutil
import sys
import tarfile
import tempfile
import urllib
import urllib.error
import urllib.parse
import warnings
import zipfile
from urllib.request import urlretrieve
from keras.src.api_export import keras_export
from keras.src.backend import config
from keras.src.utils import io_utils
from keras.src.utils.module_utils import gfile
from keras.src.utils.progbar import Progbar
def path_to_string(path):
"""Convert `PathLike` objects to their string representation.
If given a non-string typed path object, converts it to its string
representation.
If the object passed to `path` is not among the above, then it is
returned unchanged. This allows e.g. passthrough of file objects
through this function.
Args:
path: `PathLike` object that represents a path
Returns:
A string representation of the path argument, if Python support exists.
"""
if isinstance(path, os.PathLike):
return os.fspath(path)
return path
def resolve_path(path):
return os.path.realpath(os.path.abspath(path))
def is_path_in_dir(path, base_dir):
return resolve_path(os.path.join(base_dir, path)).startswith(base_dir)
def is_link_in_dir(info, base):
tip = resolve_path(os.path.join(base, os.path.dirname(info.name)))
return is_path_in_dir(info.linkname, base_dir=tip)
def filter_safe_zipinfos(members):
base_dir = resolve_path(".")
for finfo in members:
valid_path = False
if is_path_in_dir(finfo.filename, base_dir):
valid_path = True
yield finfo
if not valid_path:
warnings.warn(
"Skipping invalid path during archive extraction: "
f"'{finfo.name}'.",
stacklevel=2,
)
def filter_safe_tarinfos(members):
base_dir = resolve_path(".")
for finfo in members:
valid_path = False
if finfo.issym() or finfo.islnk():
if is_link_in_dir(finfo, base_dir):
valid_path = True
yield finfo
elif is_path_in_dir(finfo.name, base_dir):
valid_path = True
yield finfo
if not valid_path:
warnings.warn(
"Skipping invalid path during archive extraction: "
f"'{finfo.name}'.",
stacklevel=2,
)
def extract_open_archive(archive, path="."):
"""Extracts an open tar or zip archive to the provided directory.
This function filters unsafe paths during extraction.
Args:
archive: The archive object, either a `TarFile` or a `ZipFile`.
path: Where to extract the archive file.
"""
if isinstance(archive, zipfile.ZipFile):
# Zip archive.
archive.extractall(
path, members=filter_safe_zipinfos(archive.infolist())
)
else:
# Tar archive.
extractall_kwargs = {}
# The `filter="data"` option was added in Python 3.12. It became the
# default starting from Python 3.14. So we only specify it between
# those two versions.
if sys.version_info >= (3, 12) and sys.version_info < (3, 14):
extractall_kwargs = {"filter": "data"}
archive.extractall(
path,
members=filter_safe_tarinfos(archive),
**extractall_kwargs,
)
def extract_archive(file_path, path=".", archive_format="auto"):
"""Extracts an archive if it matches a support format.
Supports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats.
Args:
file_path: Path to the archive file.
path: Where to extract the archive file.
archive_format: Archive format to try for extracting the file.
Options are `"auto"`, `"tar"`, `"zip"`, and `None`.
`"tar"` includes `.tar`, `.tar.gz`, and `.tar.bz` files.
The default `"auto"` uses `["tar", "zip"]`.
`None` or an empty list will return no matches found.
Returns:
`True` if a match was found and an archive extraction was completed,
`False` otherwise.
"""
if archive_format is None:
return False
if archive_format == "auto":
archive_format = ["tar", "zip"]
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
elif archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
else:
raise NotImplementedError(archive_type)
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
extract_open_archive(archive, path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export("keras.utils.get_file")
def get_file(
fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir="datasets",
hash_algorithm="auto",
extract=False,
archive_format="auto",
cache_dir=None,
force_download=False,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats can
also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = get_file(
origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
extract=True
)
```
Args:
fname: If the target is a single file, this is your desired
local name for the file.
If `None`, the name of the file at `origin` will be used.
If downloading and extracting a directory archive,
the provided `fname` will be used as extraction directory
name (only if it doesn't have an extension).
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
Boolean, whether the file is a tar archive that should
be extracted.
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for file integrity verification.
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path, e.g. `"/path/to/folder"` is
specified, the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `"md5'`, `"sha256'`, and `"auto'`.
The default 'auto' detects the hash algorithm in use.
extract: If `True`, extracts the archive. Only applicable to compressed
archive files like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `"auto'`, `"tar'`, `"zip'`, and `None`.
`"tar"` includes tar, tar.gz, and tar.bz files.
The default `"auto"` corresponds to `["tar", "zip"]`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults ether `$KERAS_HOME` if the `KERAS_HOME` environment
variable is set or `~/.keras/`.
force_download: If `True`, the file will always be re-downloaded
regardless of the cache state.
Returns:
Path to the downloaded file.
**⚠️ Warning on malicious downloads ⚠️**
Downloading something from the Internet carries a risk.
NEVER download a file/archive if you do not trust the source.
We recommend that you specify the `file_hash` argument
(if the hash of the source file is known) to make sure that the file you
are getting is the one you expect.
"""
if origin is None:
raise ValueError(
'Please specify the "origin" argument (URL of the file '
"to download)."
)
if cache_dir is None:
cache_dir = config.keras_home()
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = "md5"
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join(
"/tmp" if os.path.isdir("/tmp") else tempfile.gettempdir(), ".keras"
)
datadir = os.path.join(datadir_base, cache_subdir)
os.makedirs(datadir, exist_ok=True)
provided_fname = fname
fname = path_to_string(fname)
if not fname:
fname = os.path.basename(urllib.parse.urlsplit(origin).path)
if not fname:
raise ValueError(
"Can't parse the file name from the origin provided: "
f"'{origin}'."
"Please specify the `fname` argument."
)
else:
if os.sep in fname:
raise ValueError(
"Paths are no longer accepted as the `fname` argument. "
"To specify the file's parent directory, use "
f"the `cache_dir` argument. Received: fname={fname}"
)
if extract or untar:
if provided_fname:
if "." in fname:
download_target = os.path.join(datadir, fname)
fname = fname[: fname.find(".")]
extraction_dir = os.path.join(datadir, f"{fname}_extracted")
else:
extraction_dir = os.path.join(datadir, fname)
download_target = os.path.join(datadir, f"{fname}_archive")
else:
extraction_dir = os.path.join(datadir, fname)
download_target = os.path.join(datadir, f"{fname}_archive")
else:
download_target = os.path.join(datadir, fname)
if force_download:
download = True
elif os.path.exists(download_target):
# File found in cache.
download = False
# Verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(
download_target, file_hash, algorithm=hash_algorithm
):
io_utils.print_msg(
"A local file was found, but it seems to be "
f"incomplete or outdated because the {hash_algorithm} "
"file hash does not match the original value of "
f"{file_hash} so we will re-download the data."
)
download = True
else:
download = True
if download:
io_utils.print_msg(f"Downloading data from {origin}")
class DLProgbar:
"""Manage progress bar state for use in urlretrieve."""
def __init__(self):
self.progbar = None
self.finished = False
def __call__(self, block_num, block_size, total_size):
if total_size == -1:
total_size = None
if not self.progbar:
self.progbar = Progbar(total_size)
current = block_num * block_size
if total_size is None:
self.progbar.update(current)
else:
if current < total_size:
self.progbar.update(current)
elif not self.finished:
self.progbar.update(self.progbar.target)
self.finished = True
error_msg = "URL fetch failure on {}: {} -- {}"
try:
try:
urlretrieve(origin, download_target, DLProgbar())
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(download_target):
os.remove(download_target)
raise
# Validate download if succeeded and user provided an expected hash
# Security conscious users would get the hash of the file from a
# separate channel and pass it to this API to prevent MITM / corruption:
if os.path.exists(download_target) and file_hash is not None:
if not validate_file(
download_target, file_hash, algorithm=hash_algorithm
):
raise ValueError(
"Incomplete or corrupted file detected. "
f"The {hash_algorithm} "
"file hash does not match the provided value "
f"of {file_hash}."
)
if extract or untar:
if untar:
archive_format = "tar"
status = extract_archive(
download_target, extraction_dir, archive_format
)
if not status:
warnings.warn("Could not extract archive.", stacklevel=2)
return extraction_dir
return download_target
def resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == "sha256":
return hashlib.sha256()
if algorithm == "auto" and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def hash_file(fpath, algorithm="sha256", chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
>>> hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
Args:
fpath: Path to the file being validated.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash.
"""
if isinstance(algorithm, str):
hasher = resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, "rb") as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Boolean, whether the file is valid.
"""
hasher = resolve_hasher(algorithm, file_hash)
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def is_remote_path(filepath):
"""
Determines if a given filepath indicates a remote location.
This function checks if the filepath represents a known remote pattern
such as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`), Placer
(`/placer`), TFHub (`/tfhub`), or a URL (`.*://`).
Args:
filepath (str): The path to be checked.
Returns:
bool: True if the filepath is a recognized remote path, otherwise False
"""
if re.match(
r"^(/cns|/cfs|/gcs|/hdfs|/readahead|/placer|/tfhub|.*://).*$",
str(filepath),
):
return True
return False
# Below are gfile-replacement utils.
def _raise_if_no_gfile(path):
raise ValueError(
"Handling remote paths requires installing TensorFlow "
f"(in order to use gfile). Received path: {path}"
)
def exists(path):
if is_remote_path(path):
if gfile.available:
return gfile.exists(path)
else:
_raise_if_no_gfile(path)
return os.path.exists(path)
def File(path, mode="r"):
if is_remote_path(path):
if gfile.available:
return gfile.GFile(path, mode=mode)
else:
_raise_if_no_gfile(path)
return open(path, mode=mode)
def join(path, *paths):
if is_remote_path(path):
if gfile.available:
return gfile.join(path, *paths)
else:
_raise_if_no_gfile(path)
return os.path.join(path, *paths)
def isdir(path):
if is_remote_path(path):
if gfile.available:
return gfile.isdir(path)
else:
_raise_if_no_gfile(path)
return os.path.isdir(path)
def remove(path):
if is_remote_path(path):
if gfile.available:
return gfile.remove(path)
else:
_raise_if_no_gfile(path)
return os.remove(path)
def rmtree(path):
if is_remote_path(path):
if gfile.available:
return gfile.rmtree(path)
else:
_raise_if_no_gfile(path)
return shutil.rmtree(path)
def listdir(path):
if is_remote_path(path):
if gfile.available:
return gfile.listdir(path)
else:
_raise_if_no_gfile(path)
return os.listdir(path)
def copy(src, dst):
if is_remote_path(src) or is_remote_path(dst):
if gfile.available:
return gfile.copy(src, dst, overwrite=True)
else:
_raise_if_no_gfile(f"src={src} dst={dst}")
return shutil.copy(src, dst)
def makedirs(path):
if is_remote_path(path):
if gfile.available:
return gfile.makedirs(path)
else:
_raise_if_no_gfile(path)
return os.makedirs(path)
"/fo"
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/image_dataset_utils.py | keras/src/utils/image_dataset_utils.py | import io
import pathlib
import numpy as np
from keras.src.api_export import keras_export
from keras.src.backend.config import standardize_data_format
from keras.src.utils import dataset_utils
from keras.src.utils import image_utils
from keras.src.utils.grain_utils import make_batch
from keras.src.utils.module_utils import grain
from keras.src.utils.module_utils import tensorflow as tf
try:
from PIL import Image as pil_image
try:
pil_image_resampling = pil_image.Resampling
except AttributeError:
pil_image_resampling = pil_image
except ImportError:
pil_image = None
pil_image_resampling = None
ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
@keras_export(
[
"keras.utils.image_dataset_from_directory",
"keras.preprocessing.image_dataset_from_directory",
]
)
def image_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
data_format=None,
format="tf",
verbose=True,
):
"""Generates a dataset from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory,
labels='inferred')` will return a dataset that yields batches of
images from the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`.
Animated gifs are truncated to the first frame.
By default, this function will return a `tf.data.Dataset` object. You can
set `format="grain"` to return a `grain.IterDataset` object instead, which
removes the TensorFlow dependency.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
labels: Either `"inferred"`
(labels are generated from the directory structure),
`None` (no labels),
or a list/tuple of integer labels of the same size as the number of
image files found in the directory. Labels should be sorted
according to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if `labels` is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`.
Whether the images will be converted to
have 1, 3, or 4 channels. Defaults to `"rgb"`.
batch_size: Size of the batches of data. Defaults to 32.
If `None`, the data will not be batched
(the dataset will yield individual samples).
image_size: Size to resize images to after they are read from disk,
specified as `(height, width)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided. Defaults to `(256, 256)`.
shuffle: Whether to shuffle the data. Defaults to `True`.
If set to `False`, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: Subset of the data to return.
One of `"training"`, `"validation"`, or `"both"`.
Only used if `validation_split` is set.
When `subset="both"`, the utility returns a tuple of two datasets
(the training and validation datasets respectively).
interpolation: String, the interpolation method used when
resizing images.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
Defaults to `"bilinear"`.
follow_links: Whether to visit subdirectories pointed to by symlinks.
Defaults to `False`.
crop_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the
target aspect ratio, the output image will be cropped so as to
return the largest possible window in the image
(of size `image_size`) that matches the target aspect ratio. By
default (`crop_to_aspect_ratio=False`), aspect ratio may not be
preserved.
pad_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the
target aspect ratio, the output image will be padded so as to
return the largest possible window in the image
(of size `image_size`) that matches the target aspect ratio. By
default (`pad_to_aspect_ratio=False`), aspect ratio may not be
preserved.
data_format: If None uses keras.config.image_data_format()
otherwise either 'channel_last' or 'channel_first'.
format: The format of the return object. Defaults to `"tf"`. Available
options are:
- `"tf"`: returns a `tf.data.Dataset` object. Requires
TensorFlow to be installed.
- `"grain"`: returns a `grain.IterDataset` object. Requires
Grain to be installed.
verbose: Whether to display number information on classes and
number of files found. Defaults to `True`.
Returns:
A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
(`format="grain"`) object.
- If `label_mode` is `None`, it yields `float32` tensors of shape
`(batch_size, image_size[0], image_size[1], num_channels)`,
encoding images (see below for rules regarding `num_channels`).
- Otherwise, it yields a tuple `(images, labels)`, where `images` has
shape `(batch_size, image_size[0], image_size[1], num_channels)`,
and `labels` follows the format described below.
Rules regarding labels format:
- if `label_mode` is `"int"`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `"binary"`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `"categorical"`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
Rules regarding number of channels in the yielded images:
- if `color_mode` is `"grayscale"`,
there's 1 channel in the image tensors.
- if `color_mode` is `"rgb"`,
there are 3 channels in the image tensors.
- if `color_mode` is `"rgba"`,
there are 4 channels in the image tensors.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"`labels` argument should be a list/tuple of integer labels, "
"of the same size as the number of image files in the target "
"directory. If you wish to infer the labels from the "
"subdirectory "
'names in the target directory, pass `labels="inferred"`. '
"If you wish to get a dataset that only contains images "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", '
'"categorical", "binary", '
f"or None. Received: label_mode={label_mode}"
)
if labels is None or label_mode is None:
labels = None
label_mode = None
if color_mode == "rgb":
num_channels = 3
elif color_mode == "rgba":
num_channels = 4
elif color_mode == "grayscale":
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rgb", "rgba", "grayscale"}. '
f"Received: color_mode={color_mode}"
)
if isinstance(image_size, int):
image_size = (image_size, image_size)
elif not isinstance(image_size, (list, tuple)) or not len(image_size) == 2:
raise ValueError(
"Invalid `image_size` value. Expected a tuple of 2 integers. "
f"Received: image_size={image_size}"
)
interpolation = interpolation.lower()
supported_interpolations = (
"bilinear",
"nearest",
"bicubic",
"area",
"lanczos3",
"lanczos5",
"gaussian",
"mitchellcubic",
)
if interpolation not in supported_interpolations:
raise ValueError(
"Argument `interpolation` should be one of "
f"{supported_interpolations}. "
f"Received: interpolation={interpolation}"
)
if format not in ("tf", "grain"):
raise ValueError(
'`format` should be either "tf" or "grain". '
f"Received: format={format}"
)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWLIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
verbose=verbose,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
data_format = standardize_data_format(data_format=data_format)
if batch_size is not None:
shuffle_buffer_size = batch_size * 8
else:
shuffle_buffer_size = 1024
if subset == "both":
(
image_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, "training"
)
(
image_paths_val,
labels_val,
) = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, "validation"
)
if not image_paths_train:
raise ValueError(
f"No training images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
if not image_paths_val:
raise ValueError(
f"No validation images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
train_dataset = paths_and_labels_to_dataset(
image_paths=image_paths_train,
image_size=image_size,
num_channels=num_channels,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
data_format=data_format,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
format=format,
)
val_dataset = paths_and_labels_to_dataset(
image_paths=image_paths_val,
image_size=image_size,
num_channels=num_channels,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
data_format=data_format,
shuffle=False,
format=format,
)
if format == "tf":
if batch_size is not None:
train_dataset = train_dataset.batch(batch_size)
val_dataset = val_dataset.batch(batch_size)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
else:
train_dataset = train_dataset.to_iter_dataset()
val_dataset = val_dataset.to_iter_dataset()
if batch_size is not None:
train_dataset = train_dataset.batch(
batch_size, batch_fn=make_batch
)
val_dataset = val_dataset.batch(batch_size, batch_fn=make_batch)
# Users may need to reference `class_names`.
train_dataset.class_names = class_names
val_dataset.class_names = class_names
# Include file paths for images as attribute.
train_dataset.file_paths = image_paths_train
val_dataset.file_paths = image_paths_val
dataset = [train_dataset, val_dataset]
else:
image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset
)
if not image_paths:
raise ValueError(
f"No images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
data_format=data_format,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
format=format,
)
if format == "tf":
if batch_size is not None:
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
else:
dataset = dataset.to_iter_dataset()
if batch_size is not None:
dataset = dataset.batch(batch_size, batch_fn=make_batch)
# Users may need to reference `class_names`.
dataset.class_names = class_names
# Include file paths for images as attribute.
dataset.file_paths = image_paths
return dataset
def paths_and_labels_to_dataset(
image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
data_format,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
format="tf",
):
"""Constructs a dataset of images and labels."""
if format == "tf":
return _paths_and_labels_to_dataset_tf(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=num_classes,
interpolation=interpolation,
data_format=data_format,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
)
elif format == "grain":
return _paths_and_labels_to_dataset_grain(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=num_classes,
interpolation=interpolation,
data_format=data_format,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
shuffle=shuffle,
seed=seed,
)
else:
raise ValueError(
'`format` should be either "tf" or "grain". '
f"Received: format={format}"
)
def _paths_and_labels_to_dataset_tf(
image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
data_format,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
"""Constructs a dataset of images and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
if label_mode:
label_ds = dataset_utils.labels_to_dataset_tf(
labels, label_mode, num_classes
)
ds = tf.data.Dataset.zip((path_ds, label_ds))
else:
ds = path_ds
if shuffle:
ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed)
args = (
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio,
pad_to_aspect_ratio,
)
if label_mode:
ds = ds.map(
lambda x, y: (_load_image_tf(x, *args), y),
num_parallel_calls=tf.data.AUTOTUNE,
)
else:
ds = ds.map(
lambda x: _load_image_tf(x, *args),
num_parallel_calls=tf.data.AUTOTUNE,
)
return ds
def _load_image_tf(
path,
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
):
"""Load an image from a path and resize it."""
img = tf.io.read_file(path)
img = tf.image.decode_image(
img, channels=num_channels, expand_animations=False
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`"
" can be set to `True`."
)
if crop_to_aspect_ratio:
from keras.src.backend import tensorflow as tf_backend
if data_format == "channels_first":
img = tf.transpose(img, (2, 0, 1))
img = image_utils.smart_resize(
img,
image_size,
interpolation=interpolation,
data_format=data_format,
backend_module=tf_backend,
)
elif pad_to_aspect_ratio:
img = tf.image.resize_with_pad(
img, image_size[0], image_size[1], method=interpolation
)
if data_format == "channels_first":
img = tf.transpose(img, (2, 0, 1))
else:
img = tf.image.resize(img, image_size, method=interpolation)
if data_format == "channels_first":
img = tf.transpose(img, (2, 0, 1))
if data_format == "channels_last":
img.set_shape((image_size[0], image_size[1], num_channels))
else:
img.set_shape((num_channels, image_size[0], image_size[1]))
return img
def _paths_and_labels_to_dataset_grain(
image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
data_format,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
shuffle=False,
seed=None,
):
"""Constructs a dataset of images and labels."""
path_ds = grain.MapDataset.source(image_paths)
if label_mode:
label_ds = dataset_utils.labels_to_dataset_grain(
labels, label_mode, num_classes
)
ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
else:
ds = path_ds
if shuffle:
ds = ds.shuffle(seed=seed)
args = (
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio,
pad_to_aspect_ratio,
)
if label_mode:
ds = ds.map(lambda data: (_load_image_grain(data[0], *args), data[1]))
else:
ds = ds.map(lambda x: _load_image_grain(x, *args))
return ds
def _load_image_grain(
path,
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
):
"""Load an image from a path and resize it."""
from keras.src import backend
from keras.src import ops
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. The use of `load_img` requires PIL."
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`"
" can be set to `True`."
)
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (pathlib.Path, bytes, str)):
if isinstance(path, pathlib.Path):
path = str(path.resolve())
img = pil_image.open(path)
else:
raise TypeError(
f"path should be path-like or io.BytesIO, not {type(path)}"
)
if num_channels == 1:
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ("L", "I;16", "I"):
img = img.convert("L")
elif num_channels == 4:
if img.mode != "RGBA":
img = img.convert("RGBA")
elif num_channels == 3:
if img.mode != "RGB":
img = img.convert("RGB")
else:
raise ValueError(
"num_channels must be 1, 3 or 4. "
f"Received: num_channels={num_channels}"
)
with backend.device_scope("cpu"):
img = ops.convert_to_tensor(np.array(img), dtype="float32")
if len(img.shape) == 2:
# If the image is grayscale, expand dims to add channel axis.
# The reason is that `ops.image.resize` expects 3D or 4D tensors.
img = ops.expand_dims(img, axis=-1)
if data_format == "channels_first":
img = ops.transpose(img, (2, 0, 1))
img = ops.image.resize(
img,
size=image_size,
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
data_format=data_format,
)
if backend.backend() == "tensorflow":
if data_format == "channels_last":
img.set_shape((image_size[0], image_size[1], num_channels))
else:
img.set_shape((num_channels, image_size[0], image_size[1]))
return img
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/backend_utils.py | keras/src/utils/backend_utils.py | import copy
import importlib
import inspect
import os
import sys
import warnings
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
def in_grain_data_pipeline():
if "grain" not in sys.modules:
# Fast path to check if grain is not imported.
return False
# We use a lightweight version of `inspect.stack` to detect execution within
# grain.
current_frame = inspect.currentframe()
while current_frame:
if (
os.path.join("grain", "_src", "python", "dataset")
in current_frame.f_code.co_filename
or os.path.join("grain", "_src", "python", "data_loader")
in current_frame.f_code.co_filename
):
return True
current_frame = current_frame.f_back
return False
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy", "openvino"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch', "
f"'numpy' and 'openvino'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
if self._backend == "openvino":
module = importlib.import_module("keras.src.backend.openvino")
return getattr(module, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
>>> import os
>>> os.environ["KERAS_BACKEND"] = "tensorflow"
>>>
>>> import keras
>>> from keras import ops
>>> type(ops.ones(()))
<class 'tensorflow.python.framework.ops.EagerTensor'>
>>>
>>> keras.config.set_backend("jax")
UserWarning: Using `keras.config.set_backend` is dangerous...
>>> del keras, ops
>>>
>>> import keras
>>> from keras import ops
>>> type(ops.ones(()))
<class 'jaxlib.xla_extension.ArrayImpl'>
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()` and re-importing all imported `keras` modules.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
warnings.warn(
"Using `keras.config.set_backend` is dangerous and should be done "
"carefully. Already-instantiated objects will not be converted. Thus, "
"any layers / tensors / etc. already created will no longer be usable "
"without errors. It is strongly recommended not to keep around any "
"Keras-originated objects instances created before calling "
"`set_backend()`. This includes any function or class instance that "
"uses any Keras functionality. All such code needs to be re-executed "
"after calling `set_backend()` and re-importing all imported `keras` "
"modules.",
stacklevel=2,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/torch_utils_test.py | keras/src/utils/torch_utils_test.py | import os
import numpy as np
import pytest
import torch
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.backend.torch.core import get_device
from keras.src.utils.torch_utils import TorchModuleWrapper
class Classifier(models.Model):
def __init__(
self, use_batch_norm=False, num_torch_layers=1, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.use_batch_norm = use_batch_norm
self.num_torch_layers = num_torch_layers
self.torch_wrappers = []
for _ in range(num_torch_layers):
modules = [torch.nn.Linear(2, 2)]
if use_batch_norm:
modules.append(torch.nn.BatchNorm1d(2))
torch_model = torch.nn.Sequential(*modules)
self.torch_wrappers.append(TorchModuleWrapper(torch_model))
self.fc = layers.Dense(1)
def call(self, x, training=None):
for wrapper in self.torch_wrappers:
x = wrapper(x, training=training)
return self.fc(x)
def get_config(self):
config = super().get_config()
config["use_batch_norm"] = self.use_batch_norm
config["num_torch_layers"] = self.num_torch_layers
return config
class ClassifierWithNoSpecialCasing(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fc1 = torch.nn.Linear(2, 4)
self.bn1 = torch.nn.BatchNorm1d(4)
self.fc2 = torch.nn.Linear(4, 4)
self.fc3 = layers.Dense(2)
def call(self, x, training=None):
return self.fc3(self.fc2(self.bn1(self.fc1(x))))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Requires torch backend"
)
class TorchUtilsTest(testing.TestCase):
@parameterized.parameters(
{"use_batch_norm": False, "num_torch_layers": 1},
{"use_batch_norm": True, "num_torch_layers": 1},
)
def test_basic_usage(self, use_batch_norm, num_torch_layers):
model = Classifier(use_batch_norm, num_torch_layers)
self.assertEqual(len(model.layers), 2)
# Linear - Weights, bias, BN - beta, gamma
torch_trainable_count = 0
for i, layer in zip(range(num_torch_layers), model.torch_wrappers):
layer_trainable_count = 2
if use_batch_norm:
layer_trainable_count += 2
self.assertEqual(
len(layer.trainable_weights), layer_trainable_count
)
torch_trainable_count += layer_trainable_count
model(np.random.random((3, 2)))
self.assertEqual(len(model.layers), 2 * num_torch_layers)
self.assertEqual(
len(model.trainable_weights), torch_trainable_count + 2
)
model.compile(optimizer="sgd", loss="mse")
model.fit(np.random.random((3, 2)), np.random.random((3, 1)))
@parameterized.named_parameters(
(
"explicit_torch_wrapper",
Classifier,
{"use_batch_norm": True, "num_torch_layers": 1},
),
("implicit_torch_wrapper", ClassifierWithNoSpecialCasing, {}),
)
def test_training_args(self, cls, kwargs):
model = cls(**kwargs)
model(np.random.random((3, 2)), training=False) # Eager call to build
ref_weights = model.get_weights()
ref_running_mean = backend.convert_to_numpy(
model.torch_wrappers[0].module[-1].running_mean
if cls is Classifier
else model.bn1.module.running_mean
)
# Test training=False doesn't affect model weights
model(np.random.random((3, 2)), training=False)
weights = model.get_weights()
for w, ref_w in zip(weights, ref_weights):
self.assertAllClose(w, ref_w)
# Test training=None affects BN's stats
model.set_weights(ref_weights) # Restore previous weights
model(np.random.random((3, 2)))
running_mean = backend.convert_to_numpy(
model.torch_wrappers[0].module[-1].running_mean
if cls is Classifier
else model.bn1.module.running_mean
)
self.assertNotAllClose(running_mean, ref_running_mean)
# Test training=True affects BN's stats
model.set_weights(ref_weights) # Restore previous weights
model(np.random.random((3, 2)), training=True)
running_mean = backend.convert_to_numpy(
model.torch_wrappers[0].module[-1].running_mean
if cls is Classifier
else model.bn1.module.running_mean
)
self.assertNotAllClose(running_mean, ref_running_mean)
def test_module_autowrapping(self):
model = ClassifierWithNoSpecialCasing()
self.assertIsInstance(model.fc1, TorchModuleWrapper)
self.assertIsInstance(model.bn1, TorchModuleWrapper)
self.assertIsInstance(model.fc2, TorchModuleWrapper)
self.assertFalse(isinstance(model.fc3, TorchModuleWrapper))
self.assertEqual(len(model.fc1.trainable_weights), 2)
self.assertEqual(len(model.bn1.trainable_weights), 2)
self.assertEqual(len(model.fc2.trainable_weights), 2)
model(np.random.random((3, 2)))
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.fc3.trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 8)
model.compile(optimizer="sgd", loss="mse")
model.fit(np.random.random((3, 2)), np.random.random((3, 2)))
def test_load_weights_autowrapping(self):
# Test loading weights
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
model = ClassifierWithNoSpecialCasing()
model.compile(optimizer="sgd", loss="mse")
x, y = np.random.random((3, 2)), np.random.random((3, 1))
x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1))
model.fit(x, y)
ref_loss = model.evaluate(x_test, y_test)
model.save_weights(temp_filepath)
new_model = ClassifierWithNoSpecialCasing()
new_model(np.random.random((3, 2)))
new_model.compile(optimizer="sgd", loss="mse")
new_model.load_weights(temp_filepath)
for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()):
self.assertAllClose(ref_w, new_w, atol=1e-5)
loss = new_model.evaluate(x_test, y_test)
self.assertAllClose(ref_loss, loss, atol=1e-5)
def test_serialize_model_autowrapping(self):
# Test loading saved model
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
model = ClassifierWithNoSpecialCasing()
model.compile(optimizer="sgd", loss="mse")
x, y = np.random.random((3, 2)), np.random.random((3, 1))
x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1))
model.fit(x, y)
ref_loss = model.evaluate(x_test, y_test)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()):
self.assertAllClose(ref_w, new_w, atol=1e-5)
loss = new_model.evaluate(x_test, y_test)
self.assertAllClose(ref_loss, loss, atol=1e-5)
@parameterized.parameters(
{"use_batch_norm": False, "num_torch_layers": 1},
{"use_batch_norm": True, "num_torch_layers": 1},
{"use_batch_norm": False, "num_torch_layers": 2},
{"use_batch_norm": True, "num_torch_layers": 2},
)
def test_load_weights(self, use_batch_norm, num_torch_layers):
# Test loading weights
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
model = Classifier(use_batch_norm, num_torch_layers)
model.compile(optimizer="sgd", loss="mse")
x, y = np.random.random((3, 2)), np.random.random((3, 1))
x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1))
model.fit(x, y)
ref_loss = model.evaluate(x_test, y_test)
model.save_weights(temp_filepath)
new_model = Classifier(use_batch_norm, num_torch_layers)
new_model(np.random.random((3, 2)))
new_model.compile(optimizer="sgd", loss="mse")
new_model.load_weights(temp_filepath)
for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()):
self.assertAllClose(ref_w, new_w, atol=1e-5)
loss = new_model.evaluate(x_test, y_test)
self.assertAllClose(ref_loss, loss, atol=1e-5)
@parameterized.parameters(
{"use_batch_norm": False, "num_torch_layers": 1},
{"use_batch_norm": True, "num_torch_layers": 1},
{"use_batch_norm": False, "num_torch_layers": 2},
{"use_batch_norm": True, "num_torch_layers": 2},
)
def test_serialize_model(self, use_batch_norm, num_torch_layers):
# Test loading saved model
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
model = Classifier(use_batch_norm, num_torch_layers)
model.compile(optimizer="sgd", loss="mse")
x, y = np.random.random((3, 2)), np.random.random((3, 1))
x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1))
model.fit(x, y)
ref_loss = model.evaluate(x_test, y_test)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()):
self.assertAllClose(ref_w, new_w, atol=1e-5)
loss = new_model.evaluate(x_test, y_test)
self.assertAllClose(ref_loss, loss, atol=1e-5)
def test_from_config(self):
module = torch.nn.Sequential(torch.nn.Linear(2, 4))
mw = TorchModuleWrapper(module)
config = mw.get_config()
new_mw = TorchModuleWrapper.from_config(config)
for ref_w, new_w in zip(mw.get_weights(), new_mw.get_weights()):
self.assertAllClose(ref_w, new_w, atol=1e-5)
def test_build_model(self):
x = keras.Input([4])
z = TorchModuleWrapper(torch.nn.Linear(4, 8), output_shape=[None, 8])(x)
y = TorchModuleWrapper(torch.nn.Linear(8, 16), output_shape=[None, 16])(
z
)
model = keras.Model(x, y)
self.assertEqual(model.predict(np.zeros([5, 4])).shape, (5, 16))
self.assertEqual(model(np.zeros([5, 4])).shape, (5, 16))
@parameterized.named_parameters(
("safe_mode", True),
("unsafe_mode", False),
)
def test_save_load(self, safe_mode):
@keras.saving.register_keras_serializable()
class M(keras.Model):
def __init__(self, module, **kwargs):
super().__init__(**kwargs)
self.module = module
def call(self, x):
return self.module(x)
def get_config(self):
base_config = super().get_config()
config = {"module": self.module}
return {**base_config, **config}
@classmethod
def from_config(cls, config):
config["module"] = saving.deserialize_keras_object(
config["module"]
)
return cls(**config)
m = M(torch.nn.Conv2d(1, 10, kernel_size=(3, 3)))
device = get_device() # Get the current device (e.g., "cuda" or "cpu")
x = torch.ones(
(10, 1, 28, 28), device=device
) # Place input on the correct device
ref_output = m(x)
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
m.save(temp_filepath)
if safe_mode:
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
saving.load_model(temp_filepath, safe_mode=safe_mode)
else:
new_model = saving.load_model(temp_filepath, safe_mode=safe_mode)
self.assertAllClose(new_model(x), ref_output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/argument_validation.py | keras/src/utils/argument_validation.py | def standardize_tuple(value, n, name, allow_zero=False):
"""Transforms non-negative/positive integer/integers into an integer tuple.
Args:
value: int or iterable of ints. The value to validate and convert.
n: int. The size of the tuple to be returned.
name: string. The name of the argument being validated, e.g. "strides"
or "kernel_size". This is only used to format error messages.
allow_zero: bool, defaults to `False`. A `ValueError` will raised
if zero is received and this argument is `False`.
Returns:
A tuple of n integers.
"""
error_msg = (
f"The `{name}` argument must be a tuple of {n} integers. "
f"Received {name}={value}"
)
if isinstance(value, int):
value_tuple = (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError(error_msg)
if len(value_tuple) != n:
raise ValueError(error_msg)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
error_msg += (
f"including element {single_value} of "
f"type {type(single_value)}"
)
raise ValueError(error_msg)
if allow_zero:
unqualified_values = {v for v in value_tuple if v < 0}
req_msg = ">= 0"
else:
unqualified_values = {v for v in value_tuple if v <= 0}
req_msg = "> 0"
if unqualified_values:
error_msg += (
f", including values {unqualified_values}"
f" that do not satisfy `value {req_msg}`"
)
raise ValueError(error_msg)
return value_tuple
def standardize_padding(value, allow_causal=False):
if isinstance(value, (list, tuple)):
return value
padding = value.lower()
if allow_causal:
allowed_values = {"valid", "same", "causal"}
else:
allowed_values = {"valid", "same"}
if padding not in allowed_values:
raise ValueError(
"The `padding` argument must be a list/tuple or one of "
f"{allowed_values}. "
f"Received: {padding}"
)
return padding
def validate_string_arg(
value,
allowable_strings,
caller_name,
arg_name,
allow_none=False,
allow_callables=False,
):
"""Validates the correctness of a string-based arg."""
if allow_none and value is None:
return
elif allow_callables and callable(value):
return
elif isinstance(value, str) and value in allowable_strings:
return
raise ValueError(
f"Unknown value for `{arg_name}` argument of {caller_name}. "
f"Allowed values are: {allowable_strings}. Received: "
f"{arg_name}={value}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/tf_utils.py | keras/src/utils/tf_utils.py | from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def is_ragged_tensor(x):
return "ragged_tensor.RaggedTensor" in str(type(x))
def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
axis=-1,
binary_output=binary_output,
)
result = tf.cast(result, dtype)
if inputs.shape.rank == 1:
output_shape = (depth,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, depth)
result = tf.SparseTensor(
indices=result.indices, values=result.values, dense_shape=output_shape
)
return result
def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input."""
result = tf.math.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
dtype=dtype,
axis=-1,
binary_output=binary_output,
)
if inputs.shape.rank == 1:
result.set_shape(tf.TensorShape((depth,)))
else:
batch_size = inputs.shape.as_list()[0]
result.set_shape(tf.TensorShape((batch_size, depth)))
return result
def expand_dims(inputs, axis):
"""Expand dims on sparse, ragged, or dense tensors."""
if isinstance(inputs, tf.SparseTensor):
return tf.sparse.expand_dims(inputs, axis)
return tf.expand_dims(inputs, axis)
def tf_encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype="float32",
sparse=False,
count_weights=None,
idf_weights=None,
):
"""Encodes categorical inputs according to output_mode.
Faster method that relies on bincount.
"""
if output_mode == "int":
return tf.identity(tf.cast(inputs, dtype))
original_shape = inputs.shape
# In all cases, we should uprank scalar input to a single sample.
if inputs.shape.rank == 0:
inputs = expand_dims(inputs, -1)
# One hot will uprank only if the final output dimension is not already 1.
if output_mode == "one_hot":
if inputs.shape[-1] != 1:
inputs = expand_dims(inputs, -1)
if inputs.shape.rank > 2:
raise ValueError(
"When output_mode is not `'int'`, maximum supported output rank "
f"is 2. Received output_mode {output_mode} and input shape "
f"{original_shape}, "
f"which would result in output rank {inputs.shape.rank}."
)
binary_output = output_mode in ("multi_hot", "one_hot")
if sparse:
bincounts = sparse_bincount(
inputs, depth, binary_output, dtype, count_weights
)
else:
bincounts = dense_bincount(
inputs, depth, binary_output, dtype, count_weights
)
bincounts = tf.cast(bincounts, dtype)
if output_mode != "tf_idf":
return bincounts
if idf_weights is None:
raise ValueError(
"When output mode is `'tf_idf'`, idf_weights must be provided. "
f"Received: output_mode={output_mode} and idf_weights={idf_weights}"
)
if sparse:
value_weights = tf.gather(idf_weights, bincounts.indices[:, -1])
return tf.SparseTensor(
bincounts.indices,
value_weights * bincounts.values,
bincounts.dense_shape,
)
else:
return tf.multiply(bincounts, idf_weights)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/image_utils_test.py | keras/src/utils/image_utils_test.py | import os
import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.utils import img_to_array
from keras.src.utils import load_img
from keras.src.utils import save_img
class SaveImgTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("rgb_explicit_format", (50, 50, 3), "rgb.jpg", "jpg", True),
("rgba_explicit_format", (50, 50, 4), "rgba.jpg", "jpg", True),
("rgb_inferred_format", (50, 50, 3), "rgb_inferred.jpg", None, False),
("rgba_inferred_format", (50, 50, 4), "rgba_inferred.jpg", None, False),
)
def test_save_jpg(self, shape, name, file_format, use_explicit_format):
tmp_dir = self.get_temp_dir()
path = os.path.join(tmp_dir, name)
img = np.random.randint(0, 256, size=shape, dtype=np.uint8)
# Test the actual inferred case - don't pass file_format at all
if use_explicit_format:
save_img(path, img, file_format=file_format)
else:
save_img(path, img) # Let it infer from path
self.assertTrue(os.path.exists(path))
# Verify saved image is correctly converted to RGB if needed
loaded_img = load_img(path)
loaded_array = img_to_array(loaded_img)
self.assertEqual(loaded_array.shape, (50, 50, 3))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/tracking.py | keras/src/utils/tracking.py | from functools import wraps
from keras.src import tree
from keras.src.backend.common.global_state import get_global_attribute
from keras.src.backend.common.global_state import set_global_attribute
from keras.src.utils import python_utils
class DotNotTrackScope:
def __enter__(self):
self.original_value = is_tracking_enabled()
set_global_attribute("tracking_on", False)
def __exit__(self, *args, **kwargs):
set_global_attribute("tracking_on", self.original_value)
def is_tracking_enabled():
return get_global_attribute("tracking_on", True)
def no_automatic_dependency_tracking(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DotNotTrackScope():
return fn(*args, **kwargs)
return wrapper
class Tracker:
"""Attribute tracker, used for e.g. Variable tracking.
Monitors certain attribute types
and put them in appropriate lists in case of a match.
Also passively tracks certain mutable collections
(dict, list) so that items added to them later
still get tracked. This is done by wrapping these
collections into an equivalent, tracking-aware object.
Example:
```python
def __init__(self):
self.tracker = Tracker(
# Format: `name: (test_fn, store)`
{
"variables":
(lambda x: isinstance(x, Variable), self._variables),
"metrics": (lambda x: isinstance(x, Metric), self._metrics),
"layers": (lambda x: isinstance(x, Layer), self._layers),
}
)
def __setattr__(self, name, value):
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
```
"""
def __init__(self, config, exclusions=None):
self.config = config
self.stored_ids = {name: set() for name in self.config.keys()}
self.locked = False
self._lock_violation_msg = None
self.exclusions = exclusions or {}
def track(self, attr):
if not is_tracking_enabled():
return attr
for store_name, (is_attr_type, _) in self.config.items():
if is_attr_type(attr):
if store_name in self.exclusions:
for excl in self.exclusions[store_name]:
if self.is_in_store(excl, attr):
return attr
if not self.is_in_store(store_name, attr):
self.add_to_store(store_name, attr)
return attr
if isinstance(attr, tuple) and hasattr(attr, "_fields"):
# Named tuple case.
wrapped_attr = {}
for name, e in attr._asdict().items():
wrapped_attr[name] = self.track(e)
return attr.__class__(**wrapped_attr)
if isinstance(attr, tuple):
wrapped_attr = []
for e in attr:
wrapped_attr.append(self.track(e))
return attr.__class__(wrapped_attr)
elif isinstance(attr, list):
return TrackedList(attr, self)
elif isinstance(attr, dict):
# TODO: OrderedDict?
return TrackedDict(attr, self)
elif isinstance(attr, set):
return TrackedSet(attr, self)
return attr
def untrack(self, value):
for store_name in self.stored_ids.keys():
if id(value) in self.stored_ids[store_name]:
self.stored_ids[store_name].remove(id(value))
python_utils.remove_by_id(self.config[store_name][1], value)
def lock(self, msg=None):
self.locked = True
if msg is not None:
self._lock_violation_msg = msg
def unlock(self):
self.locked = False
def add_to_store(self, store_name, value):
if self.locked:
raise ValueError(self._lock_violation_msg)
self.config[store_name][1].append(value)
self.stored_ids[store_name].add(id(value))
def is_in_store(self, store_name, value):
return id(value) in self.stored_ids[store_name]
def replace_tracked_value(self, store_name, old_value, new_value):
if not self.is_in_store(store_name, old_value):
raise ValueError(f"Unknown value: {old_value}")
store_list = self.config[store_name][1]
index = store_list.index(old_value)
store_list[index] = new_value
self.stored_ids[store_name].remove(id(old_value))
self.stored_ids[store_name].add(id(new_value))
@tree.register_tree_node_class
class TrackedList(list):
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
values = [tracker.track(v) for v in values]
super().__init__(values or [])
def append(self, value):
if self.tracker:
self.tracker.track(value)
super().append(value)
def insert(self, index, value):
if self.tracker:
self.tracker.track(value)
super().insert(index, value)
def extend(self, values):
if self.tracker:
values = [self.tracker.track(v) for v in values]
super().extend(values)
def remove(self, value):
if self.tracker:
self.tracker.untrack(value)
try:
super().remove(value)
except ValueError:
python_utils.remove_by_id(self, value)
def pop(self, index=-1):
if self.tracker:
value = self[index]
self.tracker.untrack(value)
return super().pop(index)
else:
return super().pop(index)
def clear(self):
if self.tracker:
for value in self:
self.tracker.untrack(value)
super().clear()
def __delitem__(self, index):
value = self[index] # Get value before removing
super().__delitem__(index)
if self.tracker:
self.tracker.untrack(value)
def tree_flatten(self):
# For optree / dmtree
return (self, None)
@classmethod
def tree_unflatten(cls, metadata, children):
# For optree / dmtree
return cls(children)
def torchtree_flatten(self):
# For torchtree
# Returns (values, metadata)
return (self, None)
@classmethod
def torchtree_unflatten(cls, children, metadata):
# For torchtree
# Requires (children, metadata)
return cls(children)
def torchtree_flatten_with_keys(self):
# For torchtree
# Returns (children, metadata)
from torch.utils import _pytree as torch_tree
values, context = self.torchtree_flatten()
return [
(torch_tree.SequenceKey(i), v) for i, v in enumerate(values)
], context
@tree.register_tree_node_class
class TrackedDict(dict):
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
values = {k: tracker.track(v) for k, v in values.items()}
super().__init__(values or [])
def __setitem__(self, key, value):
if self.tracker:
self.tracker.track(value)
super().__setitem__(key, value)
def update(self, mapping):
if self.tracker:
mapping = {k: self.tracker.track(v) for k, v in mapping.items()}
super().update(mapping)
def pop(self, key, default=None):
if self.tracker:
value = super().pop(key, default)
if value is not default:
self.tracker.untrack(value)
return value
else:
return super().pop(key, default)
def popitem(self):
key, value = super().popitem()
if self.tracker:
self.tracker.untrack(value)
return key, value
def clear(self):
if self.tracker:
for value in self.values():
self.tracker.untrack(value)
super().clear()
def tree_flatten(self):
# For optree / dmtree
keys = sorted(list(self.keys()))
values = [self[k] for k in keys]
return values, keys, keys
@classmethod
def tree_unflatten(cls, keys, values):
# For optree / dmtree
return cls(zip(keys, values))
def torchtree_flatten(self):
# For torch_tree
# Returns (values, metadata)
keys = sorted(list(self.keys()))
values = [self[k] for k in keys]
return values, keys
@classmethod
def torchtree_unflatten(cls, values, keys):
# For torch_tree
# Requires (children, metadata)
return cls(zip(keys, values))
def torchtree_flatten_with_keys(self):
# For torchtree
# Returns (children, metadata)
from torch.utils import _pytree as torch_tree
values, context = self.torchtree_flatten()
return [
(torch_tree.MappingKey(k), v) for k, v in zip(context, values)
], context
@tree.register_tree_node_class
class TrackedSet(set):
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
values = {tracker.track(v) for v in values}
super().__init__(values or [])
def add(self, value):
if self.tracker:
self.tracker.track(value)
super().add(value)
def update(self, values):
if self.tracker:
values = [self.tracker.track(v) for v in values]
super().update(values)
def remove(self, value):
if self.tracker:
self.tracker.untrack(value)
super().remove(value)
def pop(self):
value = super().pop()
if self.tracker:
self.tracker.untrack(value)
return value
def clear(self):
if self.tracker:
for value in self:
self.tracker.untrack(value)
super().clear()
def tree_flatten(self):
# For optree / dmtree
return (self, None)
@classmethod
def tree_unflatten(cls, metadata, children):
# For optree / dmtree
return cls(children)
def torchtree_flatten(self):
# For torchtree
# Returns (values, metadata)
return (self, None)
@classmethod
def torchtree_unflatten(cls, children, metadata):
# For torchtree
# Requires (values, metadata)
return cls(children)
def torchtree_flatten_with_keys(self):
# For torchtree
# Returns (children, metadata)
from torch.utils import _pytree as torch_tree
values, context = self.torchtree_flatten()
return [
(torch_tree.SequenceKey(i), v) for i, v in enumerate(values)
], context
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/sequence_utils_test.py | keras/src/utils/sequence_utils_test.py | from keras.src import testing
from keras.src.utils import sequence_utils
class PadSequencesTest(testing.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_float(self):
a = [[1.2], [1.2, 2.3], [1.2, 2.3, 3.4]]
# test padding
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="pre", dtype="float32"
)
self.assertAllClose(b, [[0, 0, 1.2], [0, 1.2, 2.3], [1.2, 2.3, 3.4]])
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="post", dtype="float32"
)
self.assertAllClose(b, [[1.2, 0, 0], [1.2, 2.3, 0], [1.2, 2.3, 3.4]])
# test truncating
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="pre", dtype="float32"
)
self.assertAllClose(b, [[0, 1.2], [1.2, 2.3], [2.3, 3.4]])
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", dtype="float32"
)
self.assertAllClose(b, [[0, 1.2], [1.2, 2.3], [1.2, 2.3]])
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1, dtype="float32")
self.assertAllClose(b, [[1, 1, 1.2], [1, 1.2, 2.3], [1.2, 2.3, 3.4]])
def test_pad_sequences_str(self):
a = [["1"], ["1", "2"], ["1", "2", "3"]]
# test padding
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="pre", value="pad", dtype=object
)
self.assertAllEqual(
b, [["pad", "pad", "1"], ["pad", "1", "2"], ["1", "2", "3"]]
)
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="post", value="pad", dtype="<U3"
)
self.assertAllEqual(
b, [["1", "pad", "pad"], ["1", "2", "pad"], ["1", "2", "3"]]
)
# test truncating
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="pre", value="pad", dtype=object
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["2", "3"]])
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad", dtype="<U3"
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["1", "2"]])
with self.assertRaisesRegex(
ValueError, "`dtype` int32 is not compatible with "
):
sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad"
)
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(
b,
[
[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(
b,
[
[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]],
],
)
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]
)
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]
)
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(
b,
[
[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/dataset_utils_test.py | keras/src/utils/dataset_utils_test.py | import collections
import itertools
import numpy as np
import torch
from absl.testing import parameterized
from torch.utils.data import Dataset as TorchDataset
from keras.src import backend
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.module_utils import tensorflow as tf
class MyTorchDataset(TorchDataset):
def __init__(self, x, y=None):
# Convert NumPy → Torch tensors if needed
def to_tensor(v):
if isinstance(v, torch.Tensor):
return v
if hasattr(v, "shape"):
return torch.as_tensor(v, dtype=torch.float32)
return v
# Convert structured input recursively
def map_structure(obj):
if isinstance(obj, (dict, collections.OrderedDict)):
return {k: map_structure(v) for k, v in obj.items()}
if isinstance(obj, (tuple, list)):
typ = type(obj)
return typ(map_structure(v) for v in obj)
return to_tensor(obj)
self.x = map_structure(x)
self.y = None if y is None else map_structure(y)
# Infer dataset length from the first tensor in x
def first_tensor(obj):
if isinstance(obj, (dict, collections.OrderedDict)):
return first_tensor(next(iter(obj.values())))
if isinstance(obj, (tuple, list)):
return first_tensor(obj[0])
return obj
self.length = len(first_tensor(self.x))
def __len__(self):
return self.length
def __getitem__(self, idx):
def index_structure(obj):
if isinstance(obj, (dict, collections.OrderedDict)):
return obj.__class__(
(k, index_structure(v)) for k, v in obj.items()
)
if isinstance(obj, (tuple, list)):
typ = type(obj)
return typ(index_structure(v) for v in obj)
return obj[idx]
if self.y is None:
return index_structure(self.x)
return index_structure(self.x), index_structure(self.y)
class DatasetUtilsTest(test_case.TestCase):
@parameterized.named_parameters(
named_product(
dataset_type=["list", "tuple", "tensorflow", "torch"],
features_shape=[(2,), (100, 2), (10, 10, 2)],
preferred_backend=[None, "tensorflow", "torch"],
)
)
def test_split_dataset(
self, dataset_type, features_shape, preferred_backend
):
n_sample, left_size, right_size = 100, 0.2, 0.8
features = np.random.sample((n_sample,) + features_shape)
labels = np.random.sample((n_sample, 1))
cardinality_function = (
tf.data.Dataset.cardinality
if (backend.backend() != "torch" and preferred_backend != "torch")
else len
)
if dataset_type == "list":
dataset = [features, labels]
elif dataset_type == "tuple":
dataset = (features, labels)
elif dataset_type == "tensorflow":
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
elif dataset_type == "torch":
dataset = MyTorchDataset(features, labels)
cardinality_function = len
else:
raise ValueError(f"Unknown dataset_type: {dataset_type}")
dataset_left, dataset_right = split_dataset(
dataset,
left_size=left_size,
right_size=right_size,
preferred_backend=preferred_backend,
)
self.assertEqual(
int(cardinality_function(dataset_left)), int(n_sample * left_size)
)
self.assertEqual(
int(cardinality_function(dataset_right)), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
self.assertEqual(sample[0].shape, features_shape)
self.assertEqual(sample[1].shape, (1,))
@parameterized.named_parameters(
named_product(structure_type=["tuple", "dict", "OrderedDict"])
)
def test_split_dataset_nested_structures(self, structure_type):
n_sample, left_size, right_size = 100, 0.2, 0.8
features1 = np.random.sample((n_sample, 2))
features2 = np.random.sample((n_sample, 10, 2))
labels = np.random.sample((n_sample, 1))
if backend.backend() != "torch":
create_dataset_function = tf.data.Dataset.from_tensor_slices
cardinality_function = tf.data.Dataset.cardinality
else:
create_dataset_function = MyTorchDataset
cardinality_function = len
if structure_type == "tuple":
dataset = create_dataset_function(((features1, features2), labels))
if structure_type == "dict":
dataset = create_dataset_function(
{"y": features2, "x": features1, "labels": labels}
)
if structure_type == "OrderedDict":
dataset = create_dataset_function(
collections.OrderedDict(
[("y", features2), ("x", features1), ("labels", labels)]
)
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(cardinality_function(dataset_left)), int(n_sample * left_size)
)
self.assertEqual(
int(cardinality_function(dataset_right)), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
if structure_type in ("dict", "OrderedDict"):
x, y, labels = sample["x"], sample["y"], sample["labels"]
elif structure_type == "tuple":
(x, y), labels = sample
self.assertEqual(x.shape, (2,))
self.assertEqual(y.shape, (10, 2))
self.assertEqual(labels.shape, (1,))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/jax_layer.py | keras/src/utils/jax_layer.py | import functools
import inspect
import itertools
import string
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
from keras.src.utils import jax_utils
from keras.src.utils import tracking
from keras.src.utils.module_utils import jax
from keras.src.utils.module_utils import tensorflow as tf
if backend.backend() == "tensorflow":
tf_no_automatic_dependency_tracking = (
tf.__internal__.tracking.no_automatic_dependency_tracking
)
else:
def tf_no_automatic_dependency_tracking(fn):
return fn
def _convert_to_jax_key(tensor):
if backend.backend() == "tensorflow":
return tf.bitcast(tensor, tf.uint32)[0]
return tensor
@keras_export("keras.layers.JaxLayer")
class JaxLayer(Layer):
"""Keras Layer that wraps a JAX model.
This layer enables the use of JAX components within Keras when using JAX as
the backend for Keras.
## Model function
This layer accepts JAX models in the form of a function, `call_fn`, which
must take the following arguments with these exact names:
- `params`: trainable parameters of the model.
- `state` (*optional*): non-trainable state of the model. Can be omitted if
the model has no non-trainable state.
- `rng` (*optional*): a `jax.random.PRNGKey` instance. Can be omitted if the
model does not need RNGs, neither during training nor during inference.
- `inputs`: inputs to the model, a JAX array or a `PyTree` of arrays.
- `training` (*optional*): an argument specifying if we're in training mode
or inference mode, `True` is passed in training mode. Can be omitted if
the model behaves the same in training mode and inference mode.
The `inputs` argument is mandatory. Inputs to the model must be provided via
a single argument. If the JAX model takes multiple inputs as separate
arguments, they must be combined into a single structure, for instance in a
`tuple` or a `dict`.
## Model weights initialization
The initialization of the `params` and `state` of the model can be handled
by this layer, in which case the `init_fn` argument must be provided. This
allows the model to be initialized dynamically with the right shape.
Alternatively, and if the shape is known, the `params` argument and
optionally the `state` argument can be used to create an already initialized
model.
The `init_fn` function, if provided, must take the following arguments with
these exact names:
- `rng`: a `jax.random.PRNGKey` instance.
- `inputs`: a JAX array or a `PyTree` of arrays with placeholder values to
provide the shape of the inputs.
- `training` (*optional*): an argument specifying if we're in training mode
or inference mode. `True` is always passed to `init_fn`. Can be omitted
regardless of whether `call_fn` has a `training` argument.
## Models with non-trainable state
For JAX models that have non-trainable state:
- `call_fn` must have a `state` argument
- `call_fn` must return a `tuple` containing the outputs of the model and
the new non-trainable state of the model
- `init_fn` must return a `tuple` containing the initial trainable params of
the model and the initial non-trainable state of the model.
This code shows a possible combination of `call_fn` and `init_fn` signatures
for a model with non-trainable state. In this example, the model has a
`training` argument and an `rng` argument in `call_fn`.
```python
def stateful_call(params, state, rng, inputs, training):
outputs = ...
new_state = ...
return outputs, new_state
def stateful_init(rng, inputs):
initial_params = ...
initial_state = ...
return initial_params, initial_state
```
## Models without non-trainable state
For JAX models with no non-trainable state:
- `call_fn` must not have a `state` argument
- `call_fn` must return only the outputs of the model
- `init_fn` must return only the initial trainable params of the model.
This code shows a possible combination of `call_fn` and `init_fn` signatures
for a model without non-trainable state. In this example, the model does not
have a `training` argument and does not have an `rng` argument in `call_fn`.
```python
def stateless_call(params, inputs):
outputs = ...
return outputs
def stateless_init(rng, inputs):
initial_params = ...
return initial_params
```
## Conforming to the required signature
If a model has a different signature than the one required by `JaxLayer`,
one can easily write a wrapper method to adapt the arguments. This example
shows a model that has multiple inputs as separate arguments, expects
multiple RNGs in a `dict`, and has a `deterministic` argument with the
opposite meaning of `training`. To conform, the inputs are combined in a
single structure using a `tuple`, the RNG is split and used the populate the
expected `dict`, and the Boolean flag is negated:
```python
def my_model_fn(params, rngs, input1, input2, deterministic):
...
if not deterministic:
dropout_rng = rngs["dropout"]
keep = jax.random.bernoulli(dropout_rng, dropout_rate, x.shape)
x = jax.numpy.where(keep, x / dropout_rate, 0)
...
...
return outputs
def my_model_wrapper_fn(params, rng, inputs, training):
input1, input2 = inputs
rng1, rng2 = jax.random.split(rng)
rngs = {"dropout": rng1, "preprocessing": rng2}
deterministic = not training
return my_model_fn(params, rngs, input1, input2, deterministic)
keras_layer = JaxLayer(my_model_wrapper_fn, params=initial_params)
```
## Usage with Haiku modules
`JaxLayer` enables the use of [Haiku](https://dm-haiku.readthedocs.io)
components in the form of
[`haiku.Module`](https://dm-haiku.readthedocs.io/en/latest/api.html#module).
This is achieved by transforming the module per the Haiku pattern and then
passing `module.apply` in the `call_fn` parameter and `module.init` in the
`init_fn` parameter if needed.
If the model has non-trainable state, it should be transformed with
[`haiku.transform_with_state`](
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform_with_state).
If the model has no non-trainable state, it should be transformed with
[`haiku.transform`](
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform).
Additionally, and optionally, if the module does not use RNGs in "apply", it
can be transformed with
[`haiku.without_apply_rng`](
https://dm-haiku.readthedocs.io/en/latest/api.html#without-apply-rng).
The following example shows how to create a `JaxLayer` from a Haiku module
that uses random number generators via `hk.next_rng_key()` and takes a
training positional argument:
```python
class MyHaikuModule(hk.Module):
def __call__(self, x, training):
x = hk.Conv2D(32, (3, 3))(x)
x = jax.nn.relu(x)
x = hk.AvgPool((1, 2, 2, 1), (1, 2, 2, 1), "VALID")(x)
x = hk.Flatten()(x)
x = hk.Linear(200)(x)
if training:
x = hk.dropout(rng=hk.next_rng_key(), rate=0.3, x=x)
x = jax.nn.relu(x)
x = hk.Linear(10)(x)
x = jax.nn.softmax(x)
return x
def my_haiku_module_fn(inputs, training):
module = MyHaikuModule()
return module(inputs, training)
transformed_module = hk.transform(my_haiku_module_fn)
keras_layer = JaxLayer(
call_fn=transformed_module.apply,
init_fn=transformed_module.init,
)
```
Args:
call_fn: The function to call the model. See description above for the
list of arguments it takes and the outputs it returns.
init_fn: the function to call to initialize the model. See description
above for the list of arguments it takes and the outputs it returns.
If `None`, then `params` and/or `state` must be provided.
params: A `PyTree` containing all the model trainable parameters. This
allows passing trained parameters or controlling the initialization.
If both `params` and `state` are `None`, `init_fn` is called at
build time to initialize the trainable parameters of the model.
state: A `PyTree` containing all the model non-trainable state. This
allows passing learned state or controlling the initialization. If
both `params` and `state` are `None`, and `call_fn` takes a `state`
argument, then `init_fn` is called at build time to initialize the
non-trainable state of the model.
seed: Seed for random number generator. Optional.
dtype: The dtype of the layer's computations and weights. Can also be a
`keras.DTypePolicy`. Optional. Defaults to the default policy.
"""
def __init__(
self,
call_fn,
init_fn=None,
params=None,
state=None,
seed=None,
**kwargs,
):
if backend.backend() not in ["jax", "tensorflow"]:
raise ValueError(
f"{self.__class__.__name__} is only supported with the JAX or"
f" Tensorflow backend. Current backend: {backend.backend()}"
)
if init_fn is None and params is None and state is None:
raise ValueError(
"`init_fn`, `params` and `state` cannot all be `None`."
)
super().__init__(**kwargs)
self.call_fn = call_fn
self.init_fn = init_fn
self.seed_generator = backend.random.SeedGenerator(seed)
self.tracked_params = self._create_variables(params, trainable=True)
self.tracked_state = self._create_variables(state, trainable=False)
if self.params is not None or self.state is not None:
self._build_at_init()
self.call_fn_arguments = self._validate_signature(
call_fn,
"call_fn",
{"params", "state", "rng", "inputs", "training"},
{"inputs"},
)
self.has_state = "state" in self.call_fn_arguments
if init_fn:
self.init_fn_arguments = self._validate_signature(
init_fn, "init_fn", {"rng", "inputs", "training"}, {"inputs"}
)
# Attributes for jax2tf functions
self.jax2tf_training_false_fn = None
self.jax2tf_training_true_fn = None
def _validate_signature(self, fn, fn_name, allowed, required):
fn_parameters = inspect.signature(fn).parameters
for parameter_name in required:
if parameter_name not in fn_parameters:
raise ValueError(
f"Missing required argument in `{fn_name}`: "
f"`{parameter_name}`"
)
parameter_names = []
for parameter in fn_parameters.values():
if parameter.name not in allowed:
raise ValueError(
f"Unsupported argument in `{fn_name}`: `{parameter.name}`, "
f"supported arguments are `{'`, `'.join(allowed)}`"
)
parameter_names.append(parameter.name)
return parameter_names
def _get_jax2tf_input_shape(self, input_shape):
"""Convert input shape in a format suitable for `jax2tf`.
`jax2tf` expects a letter for each unknown dimension, which allows
correlated dimensions. Since correlated dimensions are not supported by
Keras, we simply use 'a', 'b', 'c'..., for each unknown dimension. We
however use 'batch' for dimension 0 if not defined to correlate the
batch size across inputs.
Example (spaces added for readability):
```
input_shape: (None , 4 , None, None, 5 )
result: "(batch, 4 , a , b , 5 )"
```
Args:
input_shape: a single shape or a structure of shapes for the inputs.
Returns:
the shape or shapes structure in the `jax2tf` format as strings.
"""
dim_names = itertools.chain(
string.ascii_lowercase, # a, b, ... z
itertools.starmap( # aa, ab, ... az, ba, bb, ... zz
lambda a, b: a + b,
itertools.product(string.ascii_lowercase, repeat=2),
),
)
def get_single_jax2tf_shape(shape):
jax2tf_shape = []
for index, dim in enumerate(shape):
if dim is not None:
jax2tf_shape.append(str(dim))
elif index == 0:
jax2tf_shape.append("batch")
else:
jax2tf_shape.append(next(dim_names))
return "(" + ", ".join(jax2tf_shape) + ")"
res = tree.map_shape_structure(get_single_jax2tf_shape, input_shape)
return res
def _jax2tf_convert(self, fn, polymorphic_shapes):
from jax.experimental import jax2tf
converted_fn = jax2tf.convert(fn, polymorphic_shapes=polymorphic_shapes)
# Autograph won't work with the output of jax2tf.
converted_fn = tf.autograph.experimental.do_not_convert(converted_fn)
return converted_fn
def _partial_with_positional(self, fn, index, value):
"""Return a new partial with one positional argument set to a value.
This is needed because `jax2tf` only supports positional arguments and
`functools.partial` only supports setting positional arguments starting
from the left. Our use case is the `training` argument which is
typically the righmost argument.
Args:
fn: the function to wrap.
index: the index of the positional argument to set to `value`.
value: the value for the positional argument at `index`.
"""
@functools.wraps(fn)
def wrapper(*args):
args = args[0:index] + (value,) + args[index:]
return fn(*args)
return wrapper
@tracking.no_automatic_dependency_tracking
@tf_no_automatic_dependency_tracking
def _create_variables(self, values, trainable):
"""Create a structure of variables from a structure of JAX arrays.
`values` is traversed via JAX's `tree_map`. When a leaf is a JAX array
or a tensor-like object, a corresponding variable is created with it as
the initial value. The resulting structure of variables is assigned to
`self.params` or `self.state` depending on `trainable`. Then, a
flattened version of the variables is returned for tracking.
`self.params` or `self.state` are intentionally not tracked because
structures like `TrackedList` interfere with `jax.tree_utils`.
Note that leaf objects that are not JAX arrays and not tensor-like are
left intact as they are assumed to be configuration used by the model.
Args:
values: the structure of values to traverse.
trainable: whether to create trainable variables.
Returns:
flat list of variables initialized with `values` for tracking.
"""
def create_variable(value):
if backend.is_tensor(value) or isinstance(
value, (np.ndarray, np.generic, jax.Array)
):
dtype = value.dtype
if is_float_dtype(dtype):
dtype = None # Use the layer dtype policy
return self.add_weight(
value.shape,
initializer=backend.convert_to_tensor(value),
dtype=dtype,
trainable=trainable,
)
elif isinstance(value, (bool, int, float)):
dtype = standardize_dtype(type(value))
if is_float_dtype(dtype):
dtype = None # Use the layer dtype policy
return self.add_weight(
(),
initializer=backend.convert_to_tensor(value),
dtype=dtype,
trainable=trainable,
)
else:
return value
# Use JAX's tree_map as it understands registered classes.
variables = jax.tree_util.tree_map(create_variable, values)
if trainable:
self.params = variables
else:
self.state = variables
flat_variables, _ = jax.tree_util.tree_flatten(variables)
return flat_variables
def _get_init_rng(self):
"""
Returns a key in form of the backend array of size 2 dtype uint32
to pass to `init_fn`.
By default, this returns a Jax or TF array of size 2 by calling
`self.seed_generator.next()`. Override this to return a different
structure.
Returns:
a key as an Jax or TF array of size 2 dtype uint32 will be passed
as the `rng` argument of `init_fn`.
"""
return self.seed_generator.next()
def _get_call_rng(self, training):
"""
Returns a key in form of the backend array of size 2 dtype uint32
to pass to `call_fn`.
By default, this returns a Jax or TF array of size 2 by calling
`self.seed_generator.next()` when `training` is `True`, and `None` when
`training` is `False`. Override this to return a different structure or
to pass RNGs in inference mode too.
Returns:
a key as an Jax or TF array of size 2 dtype uint32 will be passed
as the `rng` argument of `call_fn`.
"""
if training:
return self.seed_generator.next()
else:
return None
def _initialize_weights(self, input_shape):
if jax_utils.is_in_jax_tracing_scope() or tf.inside_function():
# This exception is not actually shown, it is caught and a detailed
# warning about calling 'build' is printed.
raise ValueError(
"'JaxLayer' cannot be built in tracing scope"
"or inside tf function"
)
# Initialize `params` and `state` if needed by calling `init_fn`.
def create_input(shape):
shape = [d if d is not None else 1 for d in shape]
return jax.numpy.ones(shape)
init_inputs = tree.map_shape_structure(create_input, input_shape)
init_args = []
for argument_name in self.init_fn_arguments:
if argument_name == "rng":
init_args.append(
jax.tree_util.tree_map(
lambda x: jax.numpy.array(_convert_to_jax_key(x)),
self._get_init_rng(),
)
)
elif argument_name == "inputs":
init_args.append(init_inputs)
elif argument_name == "training":
init_args.append(True)
init_result = self.init_fn(*init_args)
if self.has_state:
init_params, init_state = init_result
else:
init_params, init_state = init_result, None
self.tracked_params = self._create_variables(
init_params, trainable=True
)
self.tracked_state = self._create_variables(init_state, trainable=False)
def build(self, input_shape):
if self.params is None and self.state is None:
self._initialize_weights(input_shape)
if backend.backend() == "tensorflow":
polymorphic_shapes = []
for argument in self.call_fn_arguments:
if argument == "inputs":
polymorphic_shapes.append(
self._get_jax2tf_input_shape(input_shape)
)
elif argument != "training":
# params, state, rng
polymorphic_shapes.append("...")
if "training" in self.call_fn_arguments:
training_argument_index = self.call_fn_arguments.index(
"training"
)
self.jax2tf_training_false_fn = self._jax2tf_convert(
self._partial_with_positional(
self.call_fn, training_argument_index, False
),
polymorphic_shapes,
)
self.jax2tf_training_true_fn = self._jax2tf_convert(
self._partial_with_positional(
self.call_fn, training_argument_index, True
),
polymorphic_shapes,
)
else:
self.jax2tf_training_false_fn = self._jax2tf_convert(
self.call_fn,
polymorphic_shapes,
)
self.jax2tf_training_true_fn = None
super().build(input_shape)
def call(self, inputs, training=False):
def unwrap_variable(variable):
return None if variable is None else variable.value
call_args = []
for argument_name in self.call_fn_arguments:
if argument_name == "params":
call_args.append(
jax.tree_util.tree_map(unwrap_variable, self.params)
)
elif argument_name == "state":
call_args.append(
jax.tree_util.tree_map(unwrap_variable, self.state)
)
elif argument_name == "rng":
call_args.append(
jax.tree_util.tree_map(
_convert_to_jax_key, self._get_call_rng(training)
)
)
elif argument_name == "inputs":
call_args.append(inputs)
elif argument_name == "training":
if backend.backend() == "jax":
call_args.append(training)
def assign_state_to_variable(value, variable):
# This exists only to make debugging this error case easier.
if not hasattr(variable, "assign"):
raise ValueError(
"Structure mismatch: the structure of the state returned "
"by `call` does not match the structure of the state at "
"initialization time."
)
variable.assign(value)
def call_with_fn(fn):
if self.has_state:
predictions, new_state = fn(*call_args)
jax.tree_util.tree_map(
assign_state_to_variable, new_state, self.state
)
return predictions
else:
return fn(*call_args)
if backend.backend() == "jax":
return call_with_fn(self.call_fn)
elif backend.backend() == "tensorflow":
if training and self.jax2tf_training_true_fn is not None:
return call_with_fn(self.jax2tf_training_true_fn)
else:
return call_with_fn(self.jax2tf_training_false_fn)
def get_config(self):
config = {
"call_fn": serialization_lib.serialize_keras_object(self.call_fn),
"init_fn": serialization_lib.serialize_keras_object(self.init_fn),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
call_fn = serialization_lib.deserialize_keras_object(config["call_fn"])
init_fn = serialization_lib.deserialize_keras_object(config["init_fn"])
config["call_fn"] = call_fn
config["init_fn"] = init_fn
return super().from_config(config)
@keras_export("keras.layers.FlaxLayer")
class FlaxLayer(JaxLayer):
"""Keras Layer that wraps a [Flax](https://flax.readthedocs.io) module.
This layer enables the use of Flax components in the form of
[`flax.linen.Module`](
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)
instances within Keras when using JAX as the backend for Keras.
The module method to use for the forward pass can be specified via the
`method` argument and is `__call__` by default. This method must take the
following arguments with these exact names:
- `self` if the method is bound to the module, which is the case for the
default of `__call__`, and `module` otherwise to pass the module.
- `inputs`: the inputs to the model, a JAX array or a `PyTree` of arrays.
- `training` *(optional)*: an argument specifying if we're in training mode
or inference mode, `True` is passed in training mode.
`FlaxLayer` handles the non-trainable state of your model and required RNGs
automatically. Note that the `mutable` parameter of
[`flax.linen.Module.apply()`](
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply)
is set to `DenyList(["params"])`, therefore making the assumption that all
the variables outside of the "params" collection are non-trainable weights.
This example shows how to create a `FlaxLayer` from a Flax `Module` with
the default `__call__` method and no training argument:
```python
class MyFlaxModule(flax.linen.Module):
@flax.linen.compact
def __call__(self, inputs):
x = inputs
x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x)
x = flax.linen.relu(x)
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = flax.linen.Dense(features=200)(x)
x = flax.linen.relu(x)
x = flax.linen.Dense(features=10)(x)
x = flax.linen.softmax(x)
return x
flax_module = MyFlaxModule()
keras_layer = FlaxLayer(flax_module)
```
This example shows how to wrap the module method to conform to the required
signature. This allows having multiple input arguments and a training
argument that has a different name and values. This additionally shows how
to use a function that is not bound to the module.
```python
class MyFlaxModule(flax.linen.Module):
@flax.linen.compact
def forward(self, input1, input2, deterministic):
...
return outputs
def my_flax_module_wrapper(module, inputs, training):
input1, input2 = inputs
return module.forward(input1, input2, not training)
flax_module = MyFlaxModule()
keras_layer = FlaxLayer(
module=flax_module,
method=my_flax_module_wrapper,
)
```
Args:
module: An instance of `flax.linen.Module` or subclass.
method: The method to call the model. This is generally a method in the
`Module`. If not provided, the `__call__` method is used. `method`
can also be a function not defined in the `Module`, in which case it
must take the `Module` as the first argument. It is used for both
`Module.init` and `Module.apply`. Details are documented in the
`method` argument of [`flax.linen.Module.apply()`](
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply).
variables: A `dict` containing all the variables of the module in the
same format as what is returned by [`flax.linen.Module.init()`](
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.init).
It should contain a "params" key and, if applicable, other keys for
collections of variables for non-trainable state. This allows
passing trained parameters and learned non-trainable state or
controlling the initialization. If `None` is passed, the module's
`init` function is called at build time to initialize the variables
of the model.
"""
def __init__(
self,
module,
method=None,
variables=None,
**kwargs,
):
# Late import to only require Flax when this is used.
from flax.core import scope as flax_scope
self.module = module
self.method = method
apply_mutable = flax_scope.DenyList(["params"])
def apply_with_training(params, state, rng, inputs, training):
return self.module.apply(
self._params_and_state_to_variables(params, state),
inputs,
rngs=rng,
method=self.method,
mutable=apply_mutable,
training=training,
)
def apply_without_training(params, state, rng, inputs):
return self.module.apply(
self._params_and_state_to_variables(params, state),
inputs,
rngs=rng,
method=self.method,
mutable=apply_mutable,
)
def init_with_training(rng, inputs, training):
return self._variables_to_params_and_state(
self.module.init(
rng,
inputs,
method=self.method,
training=training,
)
)
def init_without_training(rng, inputs):
return self._variables_to_params_and_state(
self.module.init(
rng,
inputs,
method=self.method,
)
)
if (
"training"
in inspect.signature(method or module.__call__).parameters
):
call_fn, init_fn = apply_with_training, init_with_training
else:
call_fn, init_fn = apply_without_training, init_without_training
params, state = self._variables_to_params_and_state(variables)
super().__init__(
call_fn=call_fn,
init_fn=init_fn,
params=params,
state=state,
**kwargs,
)
def _params_and_state_to_variables(self, params, state):
if params:
if state:
return {**params, **state}
else:
return params
elif state:
return state
return {}
def _variables_to_params_and_state(self, variables):
# neither params nor state
if variables is None:
return None, None
# state only
if "params" not in variables:
return {}, variables
# params only
if len(variables) == 1:
return variables, {}
# both, we need to split
params = {"params": variables["params"]}
state = {k: v for k, v in variables.items() if k != "params"}
return params, state
def _get_init_rng(self):
return {
"params": self.seed_generator.next(),
"dropout": self.seed_generator.next(),
}
def _get_call_rng(self, training):
if training:
return {"dropout": self.seed_generator.next()}
else:
return {}
def get_config(self):
config_method = self.method
if (
hasattr(self.method, "__self__")
and self.method.__self__ == self.module
):
# A method bound to the module is serialized by name.
config_method = self.method.__name__
config = {
"module": serialization_lib.serialize_keras_object(self.module),
"method": serialization_lib.serialize_keras_object(config_method),
}
base_config = super().get_config()
# call_fn and init_fn come from module, do not save them.
base_config.pop("call_fn")
base_config.pop("init_fn")
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
module = serialization_lib.deserialize_keras_object(config["module"])
method = serialization_lib.deserialize_keras_object(config["method"])
if isinstance(config["method"], str):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/tracking_test.py | keras/src/utils/tracking_test.py | import collections
from keras.src import backend
from keras.src import testing
from keras.src.utils import tracking
class TrackingTest(testing.TestCase):
def test_untracking_in_tracked_list(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
lst = tracking.TrackedList([], tracker)
lst.append(v1)
lst.append(float("nan"))
lst.append(v2)
lst.append(0)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
lst.remove(v1)
self.assertLen(lst, 3)
self.assertLen(tracked_variables, 1)
lst.remove(v2)
self.assertLen(lst, 2)
self.assertLen(tracked_variables, 0)
lst2 = tracking.TrackedList([], tracker)
lst2.append(v1)
lst2.append(float("nan"))
lst2.append(v2)
lst2.append(0)
popped_value = lst2.pop()
self.assertEqual(popped_value, 0)
self.assertLen(lst2, 3)
self.assertLen(tracked_variables, 2)
lst2.clear()
self.assertLen(lst2, 0)
self.assertLen(tracked_variables, 0)
lst2.append(v1)
lst2.append(v2)
del lst2[0]
self.assertLen(lst2, 1)
self.assertLen(tracked_variables, 1)
def test_tuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
tup = (v1, v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
def test_namedtuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
nt = collections.namedtuple("NT", ["x", "y"])
tup = nt(x=v1, y=v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertEqual(tup.x, v1)
self.assertEqual(tup.y, v2)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.