repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/file_utils_test.py | keras/src/utils/file_utils_test.py | import hashlib
import os
import shutil
import tarfile
import tempfile
import urllib
import urllib.parse
import urllib.request
import zipfile
from unittest.mock import patch
from keras.src.testing import test_case
from keras.src.utils import file_utils
class PathToStringTest(test_case.TestCase):
def test_path_to_string_with_string_path(self):
path = os.path.join(os.path.sep, "path", "to", "file.txt")
string_path = file_utils.path_to_string(path)
self.assertEqual(string_path, path)
def test_path_to_string_with_PathLike_object(self):
path = os.path.join(os.path.sep, "path", "to", "file.txt")
string_path = file_utils.path_to_string(path)
self.assertEqual(string_path, str(path))
def test_path_to_string_with_non_string_typed_path_object(self):
class NonStringTypedPathObject:
def __fspath__(self):
return os.path.join(os.path.sep, "path", "to", "file.txt")
path = NonStringTypedPathObject()
string_path = file_utils.path_to_string(path)
self.assertEqual(
string_path, os.path.join(os.path.sep, "path", "to", "file.txt")
)
def test_path_to_string_with_none_path(self):
string_path = file_utils.path_to_string(None)
self.assertEqual(string_path, None)
class ResolvePathTest(test_case.TestCase):
def test_resolve_path_with_absolute_path(self):
path = os.path.join(os.path.sep, "path", "to", "file.txt")
resolved_path = file_utils.resolve_path(path)
self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path)))
def test_resolve_path_with_relative_path(self):
path = os.path.join(".", "file.txt")
resolved_path = file_utils.resolve_path(path)
self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path)))
class IsPathInDirTest(test_case.TestCase):
def test_is_path_in_dir_with_absolute_paths(self):
base_dir = os.path.join(os.path.sep, "path", "to", "base_dir")
path = os.path.join(base_dir, "file.txt")
self.assertTrue(file_utils.is_path_in_dir(path, base_dir))
class IsLinkInDirTest(test_case.TestCase):
def setUp(self):
self._cleanup(os.path.join("test_path", "to", "base_dir"))
self._cleanup(os.path.join(".", "base_dir"))
def _cleanup(self, base_dir):
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
def test_is_link_in_dir_with_absolute_paths(self):
base_dir = os.path.join("test_path", "to", "base_dir")
link_path = os.path.join(base_dir, "symlink")
target_path = os.path.join(base_dir, "file.txt")
# Create the base_dir directory if it does not exist.
os.makedirs(base_dir, exist_ok=True)
# Create the file.txt file.
with open(target_path, "w") as f:
f.write("Hello, world!")
os.symlink(target_path, link_path)
# Creating a stat_result-like object with a name attribute
info = os.lstat(link_path)
info = type(
"stat_with_name",
(object,),
{
"name": os.path.basename(link_path),
"linkname": os.readlink(link_path),
},
)
self.assertTrue(file_utils.is_link_in_dir(info, base_dir))
def test_is_link_in_dir_with_relative_paths(self):
base_dir = os.path.join(".", "base_dir")
link_path = os.path.join(base_dir, "symlink")
target_path = os.path.join(base_dir, "file.txt")
# Create the base_dir directory if it does not exist.
os.makedirs(base_dir, exist_ok=True)
# Create the file.txt file.
with open(target_path, "w") as f:
f.write("Hello, world!")
os.symlink(target_path, link_path)
# Creating a stat_result-like object with a name attribute
info = os.lstat(link_path)
info = type(
"stat_with_name",
(object,),
{
"name": os.path.basename(link_path),
"linkname": os.readlink(link_path),
},
)
self.assertTrue(file_utils.is_link_in_dir(info, base_dir))
def tearDown(self):
self._cleanup(os.path.join("test_path", "to", "base_dir"))
self._cleanup(os.path.join(".", "base_dir"))
class FilterSafePathsTest(test_case.TestCase):
def setUp(self):
self.base_dir = os.path.join(os.getcwd(), "temp_dir")
os.makedirs(self.base_dir, exist_ok=True)
self.tar_path = os.path.join(self.base_dir, "test.tar")
def tearDown(self):
os.remove(self.tar_path)
shutil.rmtree(self.base_dir)
def test_member_within_base_dir(self):
"""Test a member within the base directory."""
with tarfile.open(self.tar_path, "w") as tar:
tar.add(__file__, arcname="safe_path.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "safe_path.txt")
def test_symlink_within_base_dir(self):
"""Test a symlink pointing within the base directory."""
symlink_path = os.path.join(self.base_dir, "symlink.txt")
target_path = os.path.join(self.base_dir, "target.txt")
with open(target_path, "w") as f:
f.write("target")
os.symlink(target_path, symlink_path)
with tarfile.open(self.tar_path, "w") as tar:
tar.add(symlink_path, arcname="symlink.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "symlink.txt")
os.remove(symlink_path)
os.remove(target_path)
def test_invalid_path_warning(self):
"""Test warning for an invalid path during archive extraction."""
invalid_path = os.path.join(os.getcwd(), "invalid.txt")
with open(invalid_path, "w") as f:
f.write("invalid")
with tarfile.open(self.tar_path, "w") as tar:
tar.add(
invalid_path, arcname="../../invalid.txt"
) # Path intended to be outside of base dir
with tarfile.open(self.tar_path, "r") as tar:
with patch("warnings.warn") as mock_warn:
_ = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
warning_msg = (
"Skipping invalid path during archive extraction: "
"'../../invalid.txt'."
)
mock_warn.assert_called_with(warning_msg, stacklevel=2)
os.remove(invalid_path)
def test_symbolic_link_in_base_dir(self):
"""symbolic link within the base directory is correctly processed."""
symlink_path = os.path.join(self.base_dir, "symlink.txt")
target_path = os.path.join(self.base_dir, "target.txt")
# Create a target file and then a symbolic link pointing to it.
with open(target_path, "w") as f:
f.write("target")
os.symlink(target_path, symlink_path)
# Add the symbolic link to the tar archive.
with tarfile.open(self.tar_path, "w") as tar:
tar.add(symlink_path, arcname="symlink.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "symlink.txt")
self.assertTrue(
members[0].issym()
) # Explicitly assert it's a symbolic link.
os.remove(symlink_path)
os.remove(target_path)
class ExtractArchiveTest(test_case.TestCase):
def setUp(self):
"""Create temporary directories and files for testing."""
self.temp_dir = tempfile.mkdtemp()
self.file_content = "Hello, world!"
# Create sample files to be archived
with open(os.path.join(self.temp_dir, "sample.txt"), "w") as f:
f.write(self.file_content)
def tearDown(self):
"""Clean up temporary directories."""
shutil.rmtree(self.temp_dir)
def create_tar(self):
archive_path = os.path.join(self.temp_dir, "sample.tar")
with tarfile.open(archive_path, "w") as archive:
archive.add(
os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt"
)
return archive_path
def create_zip(self):
archive_path = os.path.join(self.temp_dir, "sample.zip")
with zipfile.ZipFile(archive_path, "w") as archive:
archive.write(
os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt"
)
return archive_path
def test_extract_tar(self):
archive_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "extract_tar")
result = file_utils.extract_archive(archive_path, extract_path, "tar")
self.assertTrue(result)
with open(os.path.join(extract_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_extract_zip(self):
archive_path = self.create_zip()
extract_path = os.path.join(self.temp_dir, "extract_zip")
result = file_utils.extract_archive(archive_path, extract_path, "zip")
self.assertTrue(result)
with open(os.path.join(extract_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_extract_auto(self):
# This will test the 'auto' functionality
tar_archive_path = self.create_tar()
zip_archive_path = self.create_zip()
extract_tar_path = os.path.join(self.temp_dir, "extract_auto_tar")
extract_zip_path = os.path.join(self.temp_dir, "extract_auto_zip")
self.assertTrue(
file_utils.extract_archive(tar_archive_path, extract_tar_path)
)
self.assertTrue(
file_utils.extract_archive(zip_archive_path, extract_zip_path)
)
with open(os.path.join(extract_tar_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
with open(os.path.join(extract_zip_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_non_existent_file(self):
extract_path = os.path.join(self.temp_dir, "non_existent")
with self.assertRaises(FileNotFoundError):
file_utils.extract_archive("non_existent.tar", extract_path)
def test_archive_format_none(self):
archive_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "none_format")
result = file_utils.extract_archive(archive_path, extract_path, None)
self.assertFalse(result)
def test_runtime_error_during_extraction(self):
tar_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "runtime_error_extraction")
with patch.object(
tarfile.TarFile, "extractall", side_effect=RuntimeError
):
with self.assertRaises(RuntimeError):
file_utils.extract_archive(tar_path, extract_path, "tar")
self.assertFalse(os.path.exists(extract_path))
def test_keyboard_interrupt_during_extraction(self):
tar_path = self.create_tar()
extract_path = os.path.join(
self.temp_dir, "keyboard_interrupt_extraction"
)
with patch.object(
tarfile.TarFile, "extractall", side_effect=KeyboardInterrupt
):
with self.assertRaises(KeyboardInterrupt):
file_utils.extract_archive(tar_path, extract_path, "tar")
self.assertFalse(os.path.exists(extract_path))
class GetFileTest(test_case.TestCase):
def setUp(self):
"""Set up temporary directories and sample files."""
self.temp_dir = self.get_temp_dir()
self.file_path = os.path.join(self.temp_dir, "sample_file.txt")
with open(self.file_path, "w") as f:
f.write("Sample content")
def test_valid_tar_extraction(self):
"""Test valid tar.gz extraction and hash validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
_, tar_file_path = self._create_tar_file(orig_dir)
self._test_file_extraction_and_validation(
dest_dir, tar_file_path, "tar.gz"
)
def test_valid_zip_extraction(self):
"""Test valid zip extraction and hash validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
_, zip_file_path = self._create_zip_file(orig_dir)
self._test_file_extraction_and_validation(
dest_dir, zip_file_path, "zip"
)
def test_valid_text_file_download(self):
"""Test valid text file download and hash validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, "test.txt")
with open(text_file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
self._test_file_extraction_and_validation(
dest_dir, text_file_path, None
)
def test_get_file_with_tgz_extension(self):
"""Test extraction of file with .tar.gz extension."""
dest_dir = self.get_temp_dir()
orig_dir = dest_dir
_, tar_file_path = self._create_tar_file(orig_dir)
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(tar_file_path)),
)
path = file_utils.get_file(
"test.txt.tar.gz", origin, untar=True, cache_subdir=dest_dir
)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(os.path.join(path, "test.txt")))
def test_get_file_with_integrity_check(self):
"""Test file download with integrity check."""
orig_dir = self.get_temp_dir()
file_path = os.path.join(orig_dir, "test.txt")
with open(file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
hashval = file_utils.hash_file(file_path)
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(file_path))
)
path = file_utils.get_file("test.txt", origin, file_hash=hashval)
self.assertTrue(os.path.exists(path))
def test_cache_invalidation(self):
"""Test using a hash to force cache invalidation."""
cache_dir = self.get_temp_dir()
src_path = os.path.join(self.get_temp_dir(), "test.txt")
with open(src_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
orig_hash = file_utils.hash_file(src_path)
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(src_path))
)
# Download into the cache.
dest_path = file_utils.get_file(
"test.txt", origin, file_hash=orig_hash, cache_dir=cache_dir
)
self.assertEqual(orig_hash, file_utils.hash_file(dest_path))
with open(src_path, "w") as text_file:
text_file.write("Float like a zeppelin, sting like a jellyfish.")
new_hash = file_utils.hash_file(src_path)
# Without a hash, we should get the cached version.
dest_path = file_utils.get_file("test.txt", origin, cache_dir=cache_dir)
self.assertEqual(orig_hash, file_utils.hash_file(dest_path))
# Without the new hash, we should re-download.
dest_path = file_utils.get_file(
"test.txt", origin, file_hash=new_hash, cache_dir=cache_dir
)
self.assertEqual(new_hash, file_utils.hash_file(dest_path))
def test_force_download(self):
"""Test using a hash to force cache invalidation."""
cache_dir = self.get_temp_dir()
src_path = os.path.join(self.get_temp_dir(), "test.txt")
with open(src_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
orig_hash = file_utils.hash_file(src_path)
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(src_path))
)
# Download into the cache.
dest_path = file_utils.get_file("test.txt", origin, cache_dir=cache_dir)
self.assertEqual(orig_hash, file_utils.hash_file(dest_path))
with open(src_path, "w") as text_file:
text_file.write("Float like a zeppelin, sting like a jellyfish.")
new_hash = file_utils.hash_file(src_path)
# Get cached version.
dest_path = file_utils.get_file("test.txt", origin, cache_dir=cache_dir)
self.assertEqual(orig_hash, file_utils.hash_file(dest_path))
# Force download.
dest_path = file_utils.get_file(
"test.txt", origin, force_download=True, cache_dir=cache_dir
)
self.assertEqual(new_hash, file_utils.hash_file(dest_path))
def test_get_file_with_failed_integrity_check(self):
"""Test file download with failed integrity check."""
orig_dir = self.get_temp_dir()
file_path = os.path.join(orig_dir, "test.txt")
with open(file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
hashval = "0" * 64
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(file_path))
)
with self.assertRaisesRegex(
ValueError, "Incomplete or corrupted file.*"
):
_ = file_utils.get_file("test.txt", origin, file_hash=hashval)
def _create_tar_file(self, directory):
"""Helper function to create a tar file."""
text_file_path = os.path.join(directory, "test.txt")
tar_file_path = os.path.join(directory, "test.tar.gz")
with open(text_file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
with tarfile.open(tar_file_path, "w:gz") as tar_file:
tar_file.add(text_file_path, arcname="test.txt")
return text_file_path, tar_file_path
def _create_zip_file(self, directory):
"""Helper function to create a zip file."""
text_file_path = os.path.join(directory, "test.txt")
zip_file_path = os.path.join(directory, "test.zip")
with open(text_file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
with zipfile.ZipFile(zip_file_path, "w") as zip_file:
zip_file.write(text_file_path, arcname="test.txt")
return text_file_path, zip_file_path
def _test_file_extraction_and_validation(
self, dest_dir, file_path, archive_type
):
"""Helper function for file extraction and validation."""
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(file_path)),
)
hashval_md5 = file_utils.hash_file(file_path, algorithm="md5")
extract = bool(archive_type)
path = file_utils.get_file(
"test",
origin,
md5_hash=hashval_md5,
extract=extract,
cache_subdir=dest_dir,
)
if extract:
fpath = f"{path}_archive"
else:
fpath = path
self.assertTrue(os.path.exists(path))
self.assertTrue(file_utils.validate_file(fpath, hashval_md5))
if extract:
self.assertTrue(os.path.exists(os.path.join(path, "test.txt")))
def test_exists(self):
temp_dir = self.get_temp_dir()
file_path = os.path.join(temp_dir, "test_exists.txt")
with open(file_path, "w") as f:
f.write("test")
self.assertTrue(file_utils.exists(file_path))
self.assertFalse(
file_utils.exists(os.path.join(temp_dir, "non_existent.txt"))
)
def test_file_open_read(self):
temp_dir = self.get_temp_dir()
file_path = os.path.join(temp_dir, "test_file.txt")
content = "test content"
with open(file_path, "w") as f:
f.write(content)
with file_utils.File(file_path, "r") as f:
self.assertEqual(f.read(), content)
def test_file_open_write(self):
temp_dir = self.get_temp_dir()
file_path = os.path.join(temp_dir, "test_file_write.txt")
content = "test write content"
with file_utils.File(file_path, "w") as f:
f.write(content)
with open(file_path, "r") as f:
self.assertEqual(f.read(), content)
def test_isdir(self):
temp_dir = self.get_temp_dir()
self.assertTrue(file_utils.isdir(temp_dir))
file_path = os.path.join(temp_dir, "test_isdir.txt")
with open(file_path, "w") as f:
f.write("test")
self.assertFalse(file_utils.isdir(file_path))
def test_join_simple(self):
self.assertEqual(file_utils.join("/path", "to", "dir"), "/path/to/dir")
def test_join_single_directory(self):
self.assertEqual(file_utils.join("/path"), "/path")
def test_listdir(self):
content = file_utils.listdir(self.temp_dir)
self.assertIn("sample_file.txt", content)
def test_makedirs_and_rmtree(self):
new_dir = os.path.join(self.temp_dir, "new_directory")
file_utils.makedirs(new_dir)
self.assertTrue(os.path.isdir(new_dir))
file_utils.rmtree(new_dir)
self.assertFalse(os.path.exists(new_dir))
def test_copy(self):
dest_path = os.path.join(self.temp_dir, "copy_sample_file.txt")
file_utils.copy(self.file_path, dest_path)
self.assertTrue(os.path.exists(dest_path))
with open(dest_path, "r") as f:
content = f.read()
self.assertEqual(content, "Sample content")
def test_remove_sub_directory(self):
parent_dir = os.path.join(self.get_temp_dir(), "parent_directory")
child_dir = os.path.join(parent_dir, "child_directory")
file_utils.makedirs(child_dir)
file_utils.rmtree(parent_dir)
self.assertFalse(os.path.exists(parent_dir))
self.assertFalse(os.path.exists(child_dir))
def test_remove_files_inside_directory(self):
dir_path = os.path.join(self.get_temp_dir(), "test_directory")
file_path = os.path.join(dir_path, "test.txt")
file_utils.makedirs(dir_path)
with open(file_path, "w") as f:
f.write("Test content")
file_utils.rmtree(dir_path)
self.assertFalse(os.path.exists(dir_path))
self.assertFalse(os.path.exists(file_path))
def test_handle_complex_paths(self):
complex_dir = os.path.join(self.get_temp_dir(), "complex dir@#%&!")
file_utils.makedirs(complex_dir)
file_utils.rmtree(complex_dir)
self.assertFalse(os.path.exists(complex_dir))
class HashFileTest(test_case.TestCase):
def setUp(self):
self.test_content = b"Hello, World!"
self.temp_file = tempfile.NamedTemporaryFile(delete=False)
self.temp_file.write(self.test_content)
self.temp_file.close()
def tearDown(self):
os.remove(self.temp_file.name)
def test_hash_file_sha256(self):
"""Test SHA256 hashing of a file."""
expected_sha256 = (
"dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
)
calculated_sha256 = file_utils.hash_file(
self.temp_file.name, algorithm="sha256"
)
self.assertEqual(expected_sha256, calculated_sha256)
def test_hash_file_md5(self):
"""Test MD5 hashing of a file."""
expected_md5 = "65a8e27d8879283831b664bd8b7f0ad4"
calculated_md5 = file_utils.hash_file(
self.temp_file.name, algorithm="md5"
)
self.assertEqual(expected_md5, calculated_md5)
class TestValidateFile(test_case.TestCase):
def setUp(self):
self.tmp_file = tempfile.NamedTemporaryFile(delete=False)
self.tmp_file.write(b"Hello, World!")
self.tmp_file.close()
self.sha256_hash = (
"dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
)
self.md5_hash = "65a8e27d8879283831b664bd8b7f0ad4"
def test_validate_file_sha256(self):
"""Validate SHA256 hash of a file."""
self.assertTrue(
file_utils.validate_file(
self.tmp_file.name, self.sha256_hash, "sha256"
)
)
def test_validate_file_md5(self):
"""Validate MD5 hash of a file."""
self.assertTrue(
file_utils.validate_file(self.tmp_file.name, self.md5_hash, "md5")
)
def test_validate_file_auto_sha256(self):
"""Auto-detect and validate SHA256 hash."""
self.assertTrue(
file_utils.validate_file(
self.tmp_file.name, self.sha256_hash, "auto"
)
)
def test_validate_file_auto_md5(self):
"""Auto-detect and validate MD5 hash."""
self.assertTrue(
file_utils.validate_file(self.tmp_file.name, self.md5_hash, "auto")
)
def test_validate_file_wrong_hash(self):
"""Test validation with incorrect hash."""
wrong_hash = "deadbeef" * 8
self.assertFalse(
file_utils.validate_file(self.tmp_file.name, wrong_hash, "sha256")
)
def tearDown(self):
os.remove(self.tmp_file.name)
class ResolveHasherTest(test_case.TestCase):
def test_resolve_hasher_sha256(self):
"""Test resolving hasher for sha256 algorithm."""
hasher = file_utils.resolve_hasher("sha256")
self.assertIsInstance(hasher, type(hashlib.sha256()))
def test_resolve_hasher_auto_sha256(self):
"""Auto-detect and resolve hasher for sha256."""
hasher = file_utils.resolve_hasher("auto", file_hash="a" * 64)
self.assertIsInstance(hasher, type(hashlib.sha256()))
def test_resolve_hasher_auto_md5(self):
"""Auto-detect and resolve hasher for md5."""
hasher = file_utils.resolve_hasher("auto", file_hash="a" * 32)
self.assertIsInstance(hasher, type(hashlib.md5()))
def test_resolve_hasher_default(self):
"""Resolve hasher with a random algorithm value."""
hasher = file_utils.resolve_hasher("random_value")
self.assertIsInstance(hasher, type(hashlib.md5()))
class IsRemotePathTest(test_case.TestCase):
def test_gcs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/gcs/some/path/to/file.txt"))
self.assertTrue(file_utils.is_remote_path("/gcs/another/directory/"))
self.assertTrue(file_utils.is_remote_path("gcs://bucket/some/file.txt"))
def test_hdfs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("hdfs://some/path/on/hdfs"))
self.assertTrue(file_utils.is_remote_path("/hdfs/some/local/path"))
def test_cns_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/cns/some/path"))
def test_placer_remote_path(self):
self.assertTrue(
file_utils.is_remote_path("/placer/prod/home/some/path")
)
self.assertTrue(
file_utils.is_remote_path("/placer/test/home/some/path")
)
self.assertTrue(
file_utils.is_remote_path("/placer/prod/scratch/home/some/path")
)
def test_tfhub_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/tfhub/some/path"))
def test_cfs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/cfs/some/path"))
def test_readahead_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/readahead/some/path"))
def test_non_remote_paths(self):
self.assertFalse(file_utils.is_remote_path("/local/path/to/file.txt"))
self.assertFalse(
file_utils.is_remote_path("C:\\local\\path\\on\\windows\\file.txt")
)
self.assertFalse(file_utils.is_remote_path("~/relative/path/"))
self.assertFalse(file_utils.is_remote_path("./another/relative/path"))
self.assertFalse(file_utils.is_remote_path("/local/path"))
self.assertFalse(file_utils.is_remote_path("./relative/path"))
self.assertFalse(file_utils.is_remote_path("~/relative/path"))
class TestRaiseIfNoGFile(test_case.TestCase):
def test_raise_if_no_gfile_raises_correct_message(self):
path = "gs://bucket/some/file.txt"
expected_error_msg = (
"Handling remote paths requires installing TensorFlow "
f".*Received path: {path}"
)
with self.assertRaisesRegex(ValueError, expected_error_msg):
file_utils._raise_if_no_gfile(path)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/code_stats_test.py | keras/src/utils/code_stats_test.py | import os
import sys
from io import StringIO
from keras.src.testing import test_case
from keras.src.utils.code_stats import count_loc
class TestCountLoc(test_case.TestCase):
def setUp(self):
self.test_dir = "test_directory"
os.makedirs(self.test_dir, exist_ok=True)
def tearDown(self):
for root, dirs, files in os.walk(self.test_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def create_file(self, filename, content):
with open(
os.path.join(self.test_dir, filename), "w", encoding="utf-8"
) as f:
f.write(content)
def test_count_loc_valid_python(self):
self.create_file(
"sample.py", "# This is a test file\n\nprint('Hello')\n"
)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1)
def test_exclude_test_files(self):
self.create_file("sample_test.py", "print('Hello')\n")
loc = count_loc(self.test_dir, exclude=("_test",))
self.assertEqual(loc, 0)
def test_other_extensions(self):
self.create_file("sample.txt", "Hello\n")
loc = count_loc(self.test_dir, extensions=(".py",))
self.assertEqual(loc, 0)
def test_comment_lines(self):
self.create_file(
"sample.py", "# Comment\nprint('Hello')\n# Another comment\n"
)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1)
def test_empty_file(self):
self.create_file("empty.py", "")
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
def test_whitespace_only(self):
self.create_file("whitespace.py", " \n\t\n")
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
def test_inline_comments_after_code(self):
content = 'print("Hello") # This is an inline comment'
self.create_file("inline_comment_sample.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # The comment shouldn't affect the count
def test_directory_structure(self):
content1 = 'print("Hello from file1")'
content2 = 'print("Hello from file2")'
os.mkdir(os.path.join(self.test_dir, "subdir"))
self.create_file("sample1.py", content1)
self.create_file(os.path.join("subdir", "sample2.py"), content2)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 2) # Both files should be counted
def test_normal_directory_name(self):
content = 'print("Hello from a regular directory")'
os.makedirs(os.path.join(self.test_dir, "some_test_dir"))
self.create_file(os.path.join("some_test_dir", "sample.py"), content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Should count normally
def test_exclude_directory_name(self):
content = 'print("Hello from an excluded directory")'
os.makedirs(os.path.join(self.test_dir, "dir_test"))
self.create_file(os.path.join("dir_test", "sample.py"), content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
# Shouldn't count the file in dir_test due to the exclusion pattern
def test_verbose_output(self):
content = 'print("Hello")'
self.create_file("sample.py", content)
original_stdout = sys.stdout
sys.stdout = StringIO()
count_loc(self.test_dir, verbose=1)
output = sys.stdout.getvalue()
sys.stdout = original_stdout
self.assertIn("Count LoCs in", output)
def test_multiline_string_same_line(self):
content = '''"""This is a multiline string ending on the same line"""
print("Outside string")'''
self.create_file("same_line_multiline.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Only the print statement should count
def test_multiline_string_ends_on_same_line(self):
content = '"""a multiline string end on same line"""\nprint("Outstr")'
self.create_file("same_line_multiline.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Only the print statement should count
def test_multiline_string_ends_in_middle_of_line(self):
content = '''print("Start")
"""This is a multiline string ending in the middle of a line"""
"""This is another multiline string."""
print("End")'''
self.create_file("multiline_in_middle.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 2) # Both print statements should count
def test_line_starting_with_triple_quotes_not_ending(self):
content = '"""\nThis is a multiline string\n'
self.create_file("test_file_2.py", content)
path = os.path.join(self.test_dir, "test_file_2.py")
self.assertEqual(count_loc(path), 0)
# Because it's part of a multiline string
def test_line_starting_and_ending_with_triple_quotes(self):
content = '"""This is a one-liner docstring."""\n'
self.create_file("test_file_3.py", content)
path = os.path.join(self.test_dir, "test_file_3.py")
self.assertEqual(count_loc(path), 0)
# This is still considered a comment/docstring
def test_string_open_true_line_starting_with_triple_quotes(self):
content = '"""\nEnd of the multiline string."""\n'
self.create_file("test_file_4.py", content)
path = os.path.join(self.test_dir, "test_file_4.py")
self.assertEqual(count_loc(path), 0)
# Entire content is a multiline string/comment
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/image_utils.py | keras/src/utils/image_utils.py | """Utilities related to image handling."""
import io
import pathlib
import warnings
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
try:
from PIL import Image as pil_image
try:
pil_image_resampling = pil_image.Resampling
except AttributeError:
pil_image_resampling = pil_image
except ImportError:
pil_image = None
pil_image_resampling = None
if pil_image_resampling is not None:
PIL_INTERPOLATION_METHODS = {
"nearest": pil_image_resampling.NEAREST,
"bilinear": pil_image_resampling.BILINEAR,
"bicubic": pil_image_resampling.BICUBIC,
"hamming": pil_image_resampling.HAMMING,
"box": pil_image_resampling.BOX,
"lanczos": pil_image_resampling.LANCZOS,
}
@keras_export(
[
"keras.utils.array_to_img",
"keras.preprocessing.image.array_to_img",
]
)
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D NumPy array to a PIL Image instance.
Example:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = keras.utils.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a NumPy array.
data_format: Image data format, can be either `"channels_first"` or
`"channels_last"`. Defaults to `None`, in which case the global
setting `keras.backend.image_data_format()` is used (unless you
changed it, it defaults to `"channels_last"`).
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. `None` means the global setting
`keras.backend.floatx()` is used (unless you changed it, it
defaults to `"float32"`). Defaults to `None`.
Returns:
A PIL Image instance.
"""
data_format = backend.standardize_data_format(data_format)
if dtype is None:
dtype = backend.floatx()
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. "
"The use of `array_to_img` requires PIL."
)
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError(
"Expected image array to have rank 3 (single image). "
f"Got array with shape: {x.shape}"
)
# Original NumPy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == "channels_first":
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype("uint8"), "RGBA")
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype("uint8"), "RGB")
elif x.shape[2] == 1:
# grayscale
if np.max(x) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pil_image.fromarray(x[:, :, 0].astype("int32"), "I")
return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L")
else:
raise ValueError(f"Unsupported channel number: {x.shape[2]}")
@keras_export(
[
"keras.utils.img_to_array",
"keras.preprocessing.image.img_to_array",
]
)
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a NumPy array.
Example:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = keras.utils.array_to_img(img_data)
array = keras.utils.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either `"channels_first"` or
`"channels_last"`. Defaults to `None`, in which case the global
setting `keras.backend.image_data_format()` is used (unless you
changed it, it defaults to `"channels_last"`).
dtype: Dtype to use. `None` means the global setting
`keras.backend.floatx()` is used (unless you changed it, it
defaults to `"float32"`).
Returns:
A 3D NumPy array.
"""
data_format = backend.standardize_data_format(data_format)
if dtype is None:
dtype = backend.floatx()
# NumPy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == "channels_first":
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == "channels_first":
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError(f"Unsupported image shape: {x.shape}")
return x
@keras_export(["keras.utils.save_img", "keras.preprocessing.image.save_img"])
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
"""Saves an image stored as a NumPy array to a path or file object.
Args:
path: Path or file object.
x: NumPy array.
data_format: Image data format, either `"channels_first"` or
`"channels_last"`.
file_format: Optional file format override. If omitted, the format to
use is determined from the filename extension. If a file object was
used instead of a filename, this parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
data_format = backend.standardize_data_format(data_format)
# Infer format from path if not explicitly provided
if file_format is None and isinstance(path, (str, pathlib.Path)):
file_format = pathlib.Path(path).suffix[1:].lower()
# Normalize jpg → jpeg for Pillow compatibility
if file_format and file_format.lower() == "jpg":
file_format = "jpeg"
img = array_to_img(x, data_format=data_format, scale=scale)
# Handle RGBA → RGB conversion for JPEG
if img.mode == "RGBA" and file_format == "jpeg":
warnings.warn(
"The JPEG format does not support RGBA images, converting to RGB."
)
img = img.convert("RGB")
img.save(path, format=file_format, **kwargs)
@keras_export(["keras.utils.load_img", "keras.preprocessing.image.load_img"])
def load_img(
path,
color_mode="rgb",
target_size=None,
interpolation="nearest",
keep_aspect_ratio=False,
):
"""Loads an image into PIL format.
Example:
```python
image = keras.utils.load_img(image_path)
input_arr = keras.utils.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`.
The desired image format.
target_size: Either `None` (default to original size) or tuple of ints
`(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"`
is also supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A PIL Image instance.
"""
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. The use of `load_img` requires PIL."
)
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (pathlib.Path, bytes, str)):
if isinstance(path, pathlib.Path):
path = str(path.resolve())
with open(path, "rb") as f:
img = pil_image.open(io.BytesIO(f.read()))
else:
raise TypeError(
f"path should be path-like or io.BytesIO, not {type(path)}"
)
if color_mode == "grayscale":
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ("L", "I;16", "I"):
img = img.convert("L")
elif color_mode == "rgba":
if img.mode != "RGBA":
img = img.convert("RGBA")
elif color_mode == "rgb":
if img.mode != "RGB":
img = img.convert("RGB")
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in PIL_INTERPOLATION_METHODS:
raise ValueError(
"Invalid interpolation method {} specified. Supported "
"methods are {}".format(
interpolation,
", ".join(PIL_INTERPOLATION_METHODS.keys()),
)
)
resample = PIL_INTERPOLATION_METHODS[interpolation]
if keep_aspect_ratio:
width, height = img.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart,
crop_box_hstart,
crop_box_wend,
crop_box_hend,
]
img = img.resize(width_height_tuple, resample, box=crop_box)
else:
img = img.resize(width_height_tuple, resample)
return img
@keras_export("keras.preprocessing.image.smart_resize")
def smart_resize(
x,
size,
interpolation="bilinear",
data_format="channels_last",
backend_module=None,
):
"""Resize images to a target size without aspect ratio distortion.
Image datasets typically yield images that have each a different
size. However, these images need to be batched before they can be
processed by Keras layers. To be batched, images need to share the same
height and width.
You could simply do, in TF (or JAX equivalent):
```python
size = (200, 200)
ds = ds.map(lambda img: resize(img, size))
```
However, if you do this, you distort the aspect ratio of your images, since
in general they do not all have the same aspect ratio as `size`. This is
fine in many cases, but not always (e.g. for image generation models
this can be a problem).
Note that passing the argument `preserve_aspect_ratio=True` to `resize`
will preserve the aspect ratio, but at the cost of no longer respecting the
provided target size.
This calls for:
```python
size = (200, 200)
ds = ds.map(lambda img: smart_resize(img, size))
```
Your output images will actually be `(200, 200)`, and will not be distorted.
Instead, the parts of the image that do not fit within the target size
get cropped out.
The resizing process is:
1. Take the largest centered crop of the image that has the same aspect
ratio as the target size. For instance, if `size=(200, 200)` and the input
image has size `(340, 500)`, we take a crop of `(340, 340)` centered along
the width.
2. Resize the cropped image to the target size. In the example above,
we resize the `(340, 340)` crop to `(200, 200)`.
Args:
x: Input image or batch of images (as a tensor or NumPy array).
Must be in format `(height, width, channels)`
or `(batch_size, height, width, channels)`.
size: Tuple of `(height, width)` integer. Target size.
interpolation: String, interpolation to use for resizing.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"lanczos3"`, `"lanczos5"`.
Defaults to `"bilinear"`.
data_format: `"channels_last"` or `"channels_first"`.
backend_module: Backend module to use (if different from the default
backend).
Returns:
Array with shape `(size[0], size[1], channels)`.
If the input image was a NumPy array, the output is a NumPy array,
and if it was a backend-native tensor,
the output is a backend-native tensor.
"""
backend_module = backend_module or backend
if len(size) != 2:
raise ValueError(
f"Expected `size` to be a tuple of 2 integers, but got: {size}."
)
img = backend_module.convert_to_tensor(x)
if len(img.shape) is not None:
if len(img.shape) < 3 or len(img.shape) > 4:
raise ValueError(
"Expected an image array with shape `(height, width, "
"channels)`, or `(batch_size, height, width, channels)`, but "
f"got input with incorrect rank, of shape {img.shape}."
)
shape = backend_module.shape(img)
if data_format == "channels_last":
height, width = shape[-3], shape[-2]
else:
height, width = shape[-2], shape[-1]
target_height, target_width = size
# Set back to input height / width if crop_height / crop_width is not
# smaller.
if isinstance(height, int) and isinstance(width, int):
# For JAX, we need to keep the slice indices as static integers
crop_height = int(float(width * target_height) / target_width)
crop_height = max(min(height, crop_height), 1)
crop_width = int(float(height * target_width) / target_height)
crop_width = max(min(width, crop_width), 1)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
else:
crop_height = backend_module.cast(
backend_module.cast(width * target_height, "float32")
/ target_width,
"int32",
)
crop_height = backend_module.numpy.minimum(height, crop_height)
crop_height = backend_module.numpy.maximum(crop_height, 1)
crop_height = backend_module.cast(crop_height, "int32")
crop_width = backend_module.cast(
backend_module.cast(height * target_width, "float32")
/ target_height,
"int32",
)
crop_width = backend_module.numpy.minimum(width, crop_width)
crop_width = backend_module.numpy.maximum(crop_width, 1)
crop_width = backend_module.cast(crop_width, "int32")
crop_box_hstart = backend_module.cast(
backend_module.cast(height - crop_height, "float32") / 2, "int32"
)
crop_box_wstart = backend_module.cast(
backend_module.cast(width - crop_width, "float32") / 2, "int32"
)
if data_format == "channels_last":
if len(img.shape) == 4:
img = img[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
img = img[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(img.shape) == 4:
img = img[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
img = img[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
img = backend_module.image.resize(
img, size=size, interpolation=interpolation, data_format=data_format
)
if isinstance(x, np.ndarray):
return np.array(img)
return img
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/module_utils.py | keras/src/utils/module_utils.py | import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
class OrbaxLazyModule(LazyModule):
def initialize(self):
try:
parent_module = importlib.import_module("orbax.checkpoint")
self.module = parent_module.v1
except ImportError:
raise ImportError(self.import_error_msg)
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
tf2onnx = LazyModule("tf2onnx")
grain = LazyModule("grain")
litert = LazyModule("ai_edge_litert")
ocp = OrbaxLazyModule(
"orbax.checkpoint.v1",
pip_name="orbax-checkpoint",
import_error_msg=(
"OrbaxCheckpoint requires the 'orbax-checkpoint' package. "
"You can install it via pip install orbax-checkpoint"
),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/code_stats.py | keras/src/utils/code_stats.py | import os
def count_loc(directory, exclude=("_test",), extensions=(".py",), verbose=0):
loc = 0
for root, _, fnames in os.walk(directory):
skip = False
for ex in exclude:
if root.endswith(ex):
skip = True
if skip:
continue
for fname in fnames:
skip = False
for ext in extensions:
if not fname.endswith(ext):
skip = True
break
for ex in exclude:
if fname.endswith(ex + ext):
skip = True
break
if skip:
continue
fname = os.path.join(root, fname)
if verbose:
print(f"Count LoCs in {fname}")
with open(fname) as f:
lines = f.read().split("\n")
string_open = False
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if not string_open:
if not line.startswith('"""'):
loc += 1
else:
if not line.endswith('"""'):
string_open = True
else:
if line.startswith('"""'):
string_open = False
return loc
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/backend_utils_test.py | keras/src/utils/backend_utils_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import backend_utils
class BackendUtilsTest(testing.TestCase):
@parameterized.named_parameters(
("numpy", "numpy"),
("jax", "jax"),
("tensorflow", "tensorflow"),
("torch", "torch"),
)
def test_dynamic_backend(self, name):
dynamic_backend = backend_utils.DynamicBackend()
x = np.random.uniform(size=[1, 2, 3]).astype("float32")
if name == "numpy":
dynamic_backend.set_backend(name)
if backend.backend() != "numpy":
with self.assertRaisesRegex(
NotImplementedError,
"Currently, we cannot dynamically import the numpy backend",
):
y = dynamic_backend.numpy.log10(x)
else:
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, np.ndarray)
elif name == "jax":
import jax
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, jax.Array)
elif name == "tensorflow":
import tensorflow as tf
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, tf.Tensor)
elif name == "torch":
import torch
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, torch.Tensor)
def test_dynamic_backend_invalid_name(self):
dynamic_backend = backend_utils.DynamicBackend()
with self.assertRaisesRegex(ValueError, "Available backends are"):
dynamic_backend.set_backend("abc")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/sequence_utils.py | keras/src/utils/sequence_utils.py | import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to `0.`)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.str_
)
if isinstance(value, str) and dtype is not object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/text_dataset_utils_test.py | keras/src/utils/text_dataset_utils_test.py | import os
import random
import string
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import text_dataset_utils
class TextDatasetFromDirectoryTest(testing.TestCase):
def _prepare_directory(
self, num_classes=2, nested_dirs=False, count=16, length=20
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
for i in range(count):
path = paths[i % len(paths)]
filename = os.path.join(path, f"text_{i}.txt")
with open(os.path.join(temp_dir, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(length)]
)
f.write(text)
return temp_dir
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_standalone(self, format):
# Test retrieving txt files without labels from a directory and its
# subdirs. Save a few extra files in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i in range(3):
filename = f"text_{i}.txt"
with open(os.path.join(directory, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(20)]
)
f.write(text)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=5,
label_mode=None,
max_length=10,
format=format,
)
batch = next(iter(dataset))
# We just return the texts, no labels
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch.shape), [5])
self.assertDType(batch, "string")
else:
self.assertLen(batch, 5)
self.assertIsInstance(batch[0], str)
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += len(batch)
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_binary(self, format=format):
directory = self._prepare_directory(num_classes=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="int",
max_length=10,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(batch[0].shape, (8,))
self.assertDType(batch[0], "string")
self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertLen(batch[0][0], 10) # Test max_length
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="binary",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 1])
self.assertDType(batch[1], "float32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="categorical",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 2])
self.assertDType(batch[1], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_sample_count(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
sample_count = 0
for batch in dataset:
sample_count += len(batch)
self.assertEqual(sample_count, 15)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_multiclass(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 8)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += len(next(iterator))
self.assertEqual(sample_count, 15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int", format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical", format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 4])
self.assertDType(batch[1], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_validation_split(self, format):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="training",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 8)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="validation",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 2)
(
train_dataset,
val_dataset,
) = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="both",
seed=1337,
format=format,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 8)
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 2)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_manual_labels(self, format):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, labels=[0, 1], shuffle=False, format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_follow_links(self, format):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode=None,
follow_links=True,
format=format,
)
sample_count = 0
for batch in dataset:
sample_count += len(batch)
self.assertEqual(sample_count, 25)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_no_files(self, format):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No text files found"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, format=format
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_errors(self, format):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="other", format=format
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
format=format,
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1], format=format
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"], format=format
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="binary", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=2, format=format
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
validation_split=0.0,
subset="training",
format=format,
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
validation_split=0.2,
subset="training",
format=format,
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_not_batched(self, format):
directory = self._prepare_directory()
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=None,
label_mode=None,
follow_links=True,
format=format,
)
sample = next(iter(dataset))
if format == "tf":
self.assertEqual(len(sample.shape), 0)
else:
self.assertIsInstance(sample, str)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/dataset_utils.py | keras/src/utils/dataset_utils.py | import os
import random
import time
import warnings
from multiprocessing.pool import ThreadPool
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.utils import file_utils
from keras.src.utils import io_utils
from keras.src.utils.module_utils import grain
@keras_export("keras.utils.split_dataset")
def split_dataset(
dataset,
left_size=None,
right_size=None,
shuffle=False,
seed=None,
preferred_backend=None,
):
"""Splits a dataset into a left half and a right half (e.g. train / test).
Args:
dataset:
A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
or a list/tuple of arrays with the same length.
left_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the left dataset. If integer, it
signifies the number of samples to pack in the left dataset. If
`None`, defaults to the complement to `right_size`.
Defaults to `None`.
right_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the right dataset.
If integer, it signifies the number of samples to pack
in the right dataset.
If `None`, defaults to the complement to `left_size`.
Defaults to `None`.
shuffle: Boolean, whether to shuffle the data before splitting it.
seed: A random seed for shuffling.
preferred_backend: String, specifying which backend
(e.g.; "tensorflow", "torch") to use. If `None`, the
backend is inferred from the type of `dataset` - if
`dataset` is a `tf.data.Dataset`, "tensorflow" backend
is used, if `dataset` is a `torch.utils.data.Dataset`,
"torch" backend is used, and if `dataset` is a list/tuple/np.array
the current Keras backend is used. Defaults to `None`.
Returns:
A tuple of two dataset objects, the left and right splits. The exact
type of the returned objects depends on the `preferred_backend`.
For example, with a "tensorflow" backend,
`tf.data.Dataset` objects are returned. With a "torch" backend,
`torch.utils.data.Dataset` objects are returned.
Example:
>>> data = np.random.random(size=(1000, 4))
>>> left_ds, right_ds = keras.utils.split_dataset(data, left_size=0.8)
>>> # For a tf.data.Dataset, you can use .cardinality()
>>> # >>> int(left_ds.cardinality())
>>> # 800
>>> # For a torch.utils.data.Dataset, you can use len()
>>> # >>> len(left_ds)
>>> # 800
"""
preferred_backend = preferred_backend or _infer_preferred_backend(dataset)
if preferred_backend != "torch":
return _split_dataset_tf(
dataset,
left_size=left_size,
right_size=right_size,
shuffle=shuffle,
seed=seed,
)
else:
return _split_dataset_torch(
dataset,
left_size=left_size,
right_size=right_size,
shuffle=shuffle,
seed=seed,
)
def _split_dataset_tf(
dataset, left_size=None, right_size=None, shuffle=False, seed=None
):
"""Splits a dataset into a left half and a right half (e.g. train / test).
Args:
dataset:
A `tf.data.Dataset` object,
or a list/tuple of arrays with the same length.
left_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the left dataset. If integer, it
signifies the number of samples to pack in the left dataset. If
`None`, defaults to the complement to `right_size`.
Defaults to `None`.
right_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the right dataset.
If integer, it signifies the number of samples to pack
in the right dataset.
If `None`, defaults to the complement to `left_size`.
Defaults to `None`.
shuffle: Boolean, whether to shuffle the data before splitting it.
seed: A random seed for shuffling.
Returns:
A tuple of two `tf.data.Dataset` objects:
the left and right splits.
"""
from keras.src.utils.module_utils import tensorflow as tf
dataset_type_spec = _get_type_spec(dataset)
if dataset_type_spec is None:
raise TypeError(
"The `dataset` argument must be either"
"a `tf.data.Dataset` object, or"
"a list/tuple of arrays. "
f"Received: dataset={dataset} of type {type(dataset)}"
)
if right_size is None and left_size is None:
raise ValueError(
"At least one of the `left_size` or `right_size` "
"must be specified. Received: left_size=None and "
"right_size=None"
)
dataset_as_list = _convert_dataset_to_list(dataset, dataset_type_spec)
if shuffle:
if seed is None:
seed = random.randint(0, int(1e6))
random.seed(seed)
random.shuffle(dataset_as_list)
total_length = len(dataset_as_list)
left_size, right_size = _rescale_dataset_split_sizes(
left_size, right_size, total_length
)
left_split = list(dataset_as_list[:left_size])
right_split = list(dataset_as_list[-right_size:])
left_split = _restore_dataset_from_list(
left_split, dataset_type_spec, dataset
)
right_split = _restore_dataset_from_list(
right_split, dataset_type_spec, dataset
)
left_split = tf.data.Dataset.from_tensor_slices(left_split)
right_split = tf.data.Dataset.from_tensor_slices(right_split)
# apply batching to the splits if the dataset is batched
if dataset_type_spec is tf.data.Dataset and is_batched(dataset):
batch_size = get_batch_size(dataset)
if batch_size is not None:
left_split = left_split.batch(batch_size)
right_split = right_split.batch(batch_size)
left_split = left_split.prefetch(tf.data.AUTOTUNE)
right_split = right_split.prefetch(tf.data.AUTOTUNE)
return left_split, right_split
def _split_dataset_torch(
dataset, left_size=None, right_size=None, shuffle=False, seed=None
):
"""Splits a dataset into a left half and a right half (e.g. train / test).
Args:
dataset:
A `torch.utils.data.Dataset` object,
or a list/tuple of arrays with the same length.
left_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the left dataset. If integer, it
signifies the number of samples to pack in the left dataset. If
`None`, defaults to the complement to `right_size`.
Defaults to `None`.
right_size: If float (in the range `[0, 1]`), it signifies
the fraction of the data to pack in the right dataset.
If integer, it signifies the number of samples to pack
in the right dataset.
If `None`, defaults to the complement to `left_size`.
Defaults to `None`.
shuffle: Boolean, whether to shuffle the data before splitting it.
seed: A random seed for shuffling.
Returns:
A tuple of two `torch.utils.data.Dataset` objects:
the left and right splits.
"""
import torch
from torch.utils.data import TensorDataset
from torch.utils.data import random_split
dataset_type_spec = _get_type_spec(dataset)
if dataset_type_spec is None:
raise TypeError(
"The `dataset` argument must be a `torch.utils.data.Dataset`"
" object, or a list/tuple of arrays."
f" Received: dataset={dataset} of type {type(dataset)}"
)
if not isinstance(dataset, torch.utils.data.Dataset):
if dataset_type_spec is np.ndarray:
dataset = TensorDataset(torch.from_numpy(dataset))
elif dataset_type_spec in (list, tuple):
tensors = [torch.from_numpy(x) for x in dataset]
dataset = TensorDataset(*tensors)
elif is_tf_dataset(dataset):
dataset_as_list = _convert_dataset_to_list(
dataset, dataset_type_spec
)
tensors = [
torch.from_numpy(np.array(sample))
for sample in zip(*dataset_as_list)
]
dataset = TensorDataset(*tensors)
if right_size is None and left_size is None:
raise ValueError(
"At least one of the `left_size` or `right_size` "
"must be specified. "
"Received: left_size=None and right_size=None"
)
# Calculate total length and rescale split sizes
total_length = len(dataset)
left_size, right_size = _rescale_dataset_split_sizes(
left_size, right_size, total_length
)
# Shuffle the dataset if required
if shuffle:
generator = torch.Generator()
if seed is not None:
generator.manual_seed(seed)
else:
generator.seed()
else:
generator = None
left_split, right_split = random_split(
dataset, [left_size, right_size], generator=generator
)
return left_split, right_split
def _infer_preferred_backend(dataset):
"""Infer the backend from the dataset type."""
if isinstance(dataset, (list, tuple, np.ndarray)):
return backend.backend()
if is_tf_dataset(dataset):
return "tensorflow"
elif is_torch_dataset(dataset):
return "torch"
else:
raise TypeError(f"Unsupported dataset type: {type(dataset)}")
def _convert_dataset_to_list(
dataset,
dataset_type_spec,
data_size_warning_flag=True,
ensure_shape_similarity=True,
):
"""Convert `dataset` object to a list of samples.
Args:
dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
or a list/tuple of arrays.
dataset_type_spec: the type of the dataset.
data_size_warning_flag: If set to `True`, a warning will
be issued if the dataset takes longer than 10 seconds to iterate.
Defaults to `True`.
ensure_shape_similarity: If set to `True`, the shape of
the first sample will be used to validate the shape of rest of the
samples. Defaults to `True`.
Returns:
List: A list of samples.
"""
dataset_iterator = _get_data_iterator_from_dataset(
dataset, dataset_type_spec
)
dataset_as_list = []
start_time = time.time()
for sample in _get_next_sample(
dataset_iterator,
ensure_shape_similarity,
data_size_warning_flag,
start_time,
):
dataset_as_list.append(sample)
return dataset_as_list
def _get_data_iterator_from_dataset(dataset, dataset_type_spec):
"""Get the iterator from a dataset.
Args:
dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
or a list/tuple of arrays.
dataset_type_spec: The type of the dataset.
Returns:
iterator: An `iterator` object.
"""
if dataset_type_spec is list:
if len(dataset) == 0:
raise ValueError(
"Received an empty list dataset. "
"Please provide a non-empty list of arrays."
)
expected_shape = None
for i, element in enumerate(dataset):
if not isinstance(element, np.ndarray):
raise ValueError(
"Expected a list of `numpy.ndarray` objects,"
f"Received: {type(element)} at index {i}."
)
if expected_shape is None:
expected_shape = element.shape
elif element.shape[0] != expected_shape[0]:
raise ValueError(
"Received a list of NumPy arrays with different lengths."
f"Mismatch found at index {i}, "
f"Expected shape={expected_shape} "
f"Received shape={np.array(element).shape}."
"Please provide a list of NumPy arrays of the same length."
)
return iter(zip(*dataset))
elif dataset_type_spec is tuple:
if len(dataset) == 0:
raise ValueError(
"Received an empty list dataset."
"Please provide a non-empty tuple of arrays."
)
expected_shape = None
for i, element in enumerate(dataset):
if not isinstance(element, np.ndarray):
raise ValueError(
"Expected a tuple of `numpy.ndarray` objects,"
f"Received: {type(element)} at index {i}."
)
if expected_shape is None:
expected_shape = element.shape
elif element.shape[0] != expected_shape[0]:
raise ValueError(
"Received a tuple of NumPy arrays with different lengths."
f"Mismatch found at index {i}, "
f"Expected shape={expected_shape} "
f"Received shape={np.array(element).shape}."
"Please provide a tuple of NumPy arrays of the same length."
)
return iter(zip(*dataset))
elif is_tf_dataset(dataset):
if is_batched(dataset):
dataset = dataset.unbatch()
return iter(dataset)
elif is_torch_dataset(dataset):
return iter(dataset)
elif dataset_type_spec is np.ndarray:
return iter(dataset)
raise ValueError(f"Invalid dataset_type_spec: {dataset_type_spec}")
def _get_next_sample(
dataset_iterator,
ensure_shape_similarity,
data_size_warning_flag,
start_time,
):
"""Yield data samples from the `dataset_iterator`.
Args:
dataset_iterator: An `iterator` object.
ensure_shape_similarity: If set to `True`, the shape of
the first sample will be used to validate the shape of rest of the
samples. Defaults to `True`.
data_size_warning_flag: If set to `True`, a warning will
be issued if the dataset takes longer than 10 seconds to iterate.
Defaults to `True`.
start_time (float): the start time of the dataset iteration. this is
used only if `data_size_warning_flag` is set to true.
Yields:
data_sample: The next sample.
"""
from keras.src.trainers.data_adapters.data_adapter_utils import (
is_tensorflow_tensor,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
is_torch_tensor,
)
try:
dataset_iterator = iter(dataset_iterator)
first_sample = next(dataset_iterator)
if (
isinstance(first_sample, np.ndarray)
or is_tensorflow_tensor(first_sample)
or is_torch_tensor(first_sample)
):
first_sample_shape = np.array(first_sample).shape
else:
first_sample_shape = None
ensure_shape_similarity = False
yield first_sample
except StopIteration:
raise ValueError(
"Received an empty dataset. Argument `dataset` must "
"be a non-empty list/tuple of `numpy.ndarray` objects "
"or `tf.data.Dataset` objects."
)
for i, sample in enumerate(dataset_iterator):
if ensure_shape_similarity:
if first_sample_shape != np.array(sample).shape:
raise ValueError(
"All `dataset` samples must have same shape, "
f"Expected shape: {np.array(first_sample).shape} "
f"Received shape: {np.array(sample).shape} at index "
f"{i}."
)
if data_size_warning_flag:
if i % 10 == 0:
cur_time = time.time()
# warns user if the dataset is too large to iterate within 10s
if int(cur_time - start_time) > 10 and data_size_warning_flag:
warnings.warn(
"The dataset is taking longer than 10 seconds to "
"iterate over. This may be due to the size of the "
"dataset. Keep in mind that the `split_dataset` "
"utility is only for small in-memory dataset "
"(e.g. < 10,000 samples).",
category=ResourceWarning,
source="split_dataset",
)
data_size_warning_flag = False
yield sample
def is_tf_dataset(dataset):
return _mro_matches(
dataset,
class_names=("DatasetV2", "Dataset"),
module_substrings=(
"tensorflow.python.data", # TF classic
"tensorflow.data", # newer TF paths
),
)
def is_grain_dataset(dataset):
return _mro_matches(
dataset,
class_names=("MapDataset", "IterDataset"),
module_prefixes=("grain._src.python",),
)
def is_torch_dataset(dataset):
return _mro_matches(dataset, ("Dataset",), ("torch.utils.data",))
def _mro_matches(
dataset, class_names, module_prefixes=(), module_substrings=()
):
if not hasattr(dataset, "__class__"):
return False
for parent in dataset.__class__.__mro__:
if parent.__name__ in class_names:
mod = str(parent.__module__)
if any(mod.startswith(pref) for pref in module_prefixes):
return True
if any(subs in mod for subs in module_substrings):
return True
return False
def _rescale_dataset_split_sizes(left_size, right_size, total_length):
"""Rescale the dataset split sizes.
We want to ensure that the sum of
the split sizes is equal to the total length of the dataset.
Args:
left_size: The size of the left dataset split.
right_size: The size of the right dataset split.
total_length: The total length of the dataset.
Returns:
tuple: A tuple of rescaled `left_size` and `right_size` integers.
"""
left_size_type = type(left_size)
right_size_type = type(right_size)
# check both left_size and right_size are integers or floats
if (left_size is not None and left_size_type not in [int, float]) and (
right_size is not None and right_size_type not in [int, float]
):
raise TypeError(
"Invalid `left_size` and `right_size` Types. Expected: "
"integer or float or None, Received: type(left_size)="
f"{left_size_type} and type(right_size)={right_size_type}"
)
# check left_size is a integer or float
if left_size is not None and left_size_type not in [int, float]:
raise TypeError(
"Invalid `left_size` Type. Expected: int or float or None, "
f"Received: type(left_size)={left_size_type}. "
)
# check right_size is a integer or float
if right_size is not None and right_size_type not in [int, float]:
raise TypeError(
"Invalid `right_size` Type. "
"Expected: int or float or None,"
f"Received: type(right_size)={right_size_type}."
)
# check left_size and right_size are non-zero
if left_size == 0 and right_size == 0:
raise ValueError(
"Both `left_size` and `right_size` are zero. "
"At least one of the split sizes must be non-zero."
)
# check left_size is non-negative and less than 1 and less than total_length
if (
left_size_type is int
and (left_size <= 0 or left_size >= total_length)
or left_size_type is float
and (left_size <= 0 or left_size >= 1)
):
raise ValueError(
"`left_size` should be either a positive integer "
f"smaller than {total_length}, or a float "
"within the range `[0, 1]`. Received: left_size="
f"{left_size}"
)
# check right_size is non-negative and less than 1 and less than
# total_length
if (
right_size_type is int
and (right_size <= 0 or right_size >= total_length)
or right_size_type is float
and (right_size <= 0 or right_size >= 1)
):
raise ValueError(
"`right_size` should be either a positive integer "
f"and smaller than {total_length} or a float "
"within the range `[0, 1]`. Received: right_size="
f"{right_size}"
)
# check sum of left_size and right_size is less than or equal to
# total_length
if (
right_size_type is left_size_type is float
and right_size + left_size > 1
):
raise ValueError(
"The sum of `left_size` and `right_size` is greater "
"than 1. It must be less than or equal to 1."
)
if left_size_type is float:
left_size = round(left_size * total_length)
elif left_size_type is int:
left_size = float(left_size)
if right_size_type is float:
right_size = round(right_size * total_length)
elif right_size_type is int:
right_size = float(right_size)
if left_size is None:
left_size = total_length - right_size
elif right_size is None:
right_size = total_length - left_size
if left_size + right_size > total_length:
raise ValueError(
"The sum of `left_size` and `right_size` should "
f"be smaller than the {total_length}. "
f"Received: left_size + right_size = {left_size + right_size}"
f"and total_length = {total_length}"
)
for split, side in [(left_size, "left"), (right_size, "right")]:
if split == 0:
raise ValueError(
f"With `dataset` of length={total_length}, `left_size`="
f"{left_size} and `right_size`={right_size}."
f"Resulting {side} side dataset split will be empty. "
"Adjust any of the aforementioned parameters"
)
left_size, right_size = int(left_size), int(right_size)
return left_size, right_size
def _restore_dataset_from_list(
dataset_as_list, dataset_type_spec, original_dataset
):
"""Restore the dataset from the list of arrays."""
if (
dataset_type_spec in [tuple, list]
or is_tf_dataset(original_dataset)
or is_torch_dataset(original_dataset)
):
# Save structure by taking the first element.
element_spec = dataset_as_list[0]
# Flatten each element.
dataset_as_list = [tree.flatten(sample) for sample in dataset_as_list]
# Combine respective elements at all indices.
dataset_as_list = [np.array(sample) for sample in zip(*dataset_as_list)]
# Recreate the original structure of elements.
dataset_as_list = tree.pack_sequence_as(element_spec, dataset_as_list)
# Turn lists to tuples as tf.data will fail on lists.
return tree.traverse(
lambda x: tuple(x) if isinstance(x, list) else x,
dataset_as_list,
top_down=False,
)
return dataset_as_list
def is_batched(dataset):
"""Check if the `tf.data.Dataset` is batched."""
return hasattr(dataset, "_batch_size")
def get_batch_size(dataset):
"""Get the batch size of the dataset."""
if is_batched(dataset):
return dataset._batch_size
else:
return None
def _get_type_spec(dataset):
"""Get the type spec of the dataset."""
if isinstance(dataset, tuple):
return tuple
elif isinstance(dataset, list):
return list
elif isinstance(dataset, np.ndarray):
return np.ndarray
elif is_tf_dataset(dataset):
from keras.src.utils.module_utils import tensorflow as tf
return tf.data.Dataset
elif is_torch_dataset(dataset):
from torch.utils.data import Dataset as TorchDataset
return TorchDataset
elif is_grain_dataset(dataset):
from grain import MapDataset
return MapDataset
else:
return None
def index_directory(
directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False,
verbose=True,
):
"""List all files in `directory`, with their labels.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain
subdirectories, each containing files for a class.
Otherwise, the directory structure is ignored.
labels: Either `"inferred"`
(labels are generated from the directory structure),
`None` (no labels),
or a list/tuple of integer labels of the same size as the number
of valid files found in the directory.
Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index
(e.g. `".jpg"`, `".txt"`).
class_names: Only valid if `labels="inferred"`. This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Defaults to `True`.
If set to `False`, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
verbose: Whether the function prints number of files found and classes.
Defaults to `True`.
Returns:
tuple (file_paths, labels, class_names).
- file_paths: list of file paths (strings).
- labels: list of matching integer labels (same length as file_paths)
- class_names: names of the classes corresponding to these labels, in
order.
"""
if file_utils.is_remote_path(directory):
from keras.src.utils.module_utils import tensorflow as tf
os_module = tf.io.gfile
path_module = tf.io.gfile
else:
os_module = os
path_module = os.path
if labels == "inferred":
subdirs = []
for subdir in sorted(os_module.listdir(directory)):
if path_module.isdir(path_module.join(directory, subdir)):
if not subdir.startswith("."):
if subdir.endswith("/"):
subdir = subdir[:-1]
subdirs.append(subdir)
if class_names is not None:
if not set(class_names).issubset(set(subdirs)):
raise ValueError(
"The `class_names` passed did not match the "
"names of the subdirectories of the target directory. "
f"Expected: {subdirs} (or a subset of it), "
f"but received: class_names={class_names}"
)
subdirs = class_names # Keep provided order.
else:
# In the explicit/no-label cases, index from the parent directory down.
subdirs = [""]
if class_names is not None:
if labels is None:
raise ValueError(
"When `labels=None` (no labels), argument `class_names` "
"cannot be specified."
)
else:
raise ValueError(
"When argument `labels` is specified, argument "
"`class_names` cannot be specified (the `class_names` "
"will be the sorted list of labels)."
)
class_names = subdirs
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = ThreadPool()
results = []
filenames = []
for dirpath in (path_module.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(
index_subdirectory,
(dirpath, class_indices, follow_links, formats),
)
)
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
if labels == "inferred":
# Inferred labels.
i = 0
labels = np.zeros((len(filenames),), dtype="int32")
for partial_labels in labels_list:
labels[i : i + len(partial_labels)] = partial_labels
i += len(partial_labels)
elif labels is None:
class_names = None
else:
# Manual labels.
if len(labels) != len(filenames):
raise ValueError(
"Expected the lengths of `labels` to match the number "
"of files in the target directory. len(labels) is "
f"{len(labels)} while we found {len(filenames)} files "
f"in directory {directory}."
)
class_names = [str(label) for label in sorted(set(labels))]
if verbose:
if labels is None:
io_utils.print_msg(f"Found {len(filenames)} files.")
else:
io_utils.print_msg(
f"Found {len(filenames)} files belonging "
f"to {len(class_names)} classes."
)
pool.close()
pool.join()
file_paths = [path_module.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
if labels is not None:
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
def iter_valid_files(directory, follow_links, formats):
if file_utils.is_remote_path(directory):
from keras.src.utils.module_utils import tensorflow as tf
io_module = tf.io.gfile
else:
io_module = os
if not follow_links:
walk = io_module.walk(directory)
else:
walk = os.walk(directory, followlinks=follow_links)
for root, _, files in sorted(walk, key=lambda x: x[0]):
for fname in sorted(files):
if fname.lower().endswith(formats):
yield root, fname
def index_subdirectory(directory, class_indices, follow_links, formats):
"""Recursively walks directory and list image paths and their class index.
Args:
directory: string, target directory.
class_indices: dict mapping class names to their index.
follow_links: boolean, whether to recursively follow subdirectories
(if False, we only list top-level images in `directory`).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
Returns:
tuple `(filenames, labels)`. `filenames` is a list of relative file
paths, and `labels` is a list of integer labels corresponding
to these files.
"""
if file_utils.is_remote_path(directory):
from keras.src.utils.module_utils import tensorflow as tf
path_module = tf.io.gfile
else:
path_module = os.path
dirname = os.path.basename(directory)
valid_files = iter_valid_files(directory, follow_links, formats)
labels = []
filenames = []
for root, fname in valid_files:
labels.append(class_indices[dirname])
absolute_path = path_module.join(root, fname)
relative_path = path_module.join(
dirname, os.path.relpath(absolute_path, directory)
)
filenames.append(relative_path)
return filenames, labels
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restrict samples & labels to a training or validation split.
Args:
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either `"training"`, `"validation"`, or `None`.
If `None`, we return all of the data.
Returns:
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/numerical_utils_test.py | keras/src/utils/numerical_utils_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
def test_build_pos_neg_masks(self):
query_labels = np.array([0, 1, 2, 2, 0])
key_labels = np.array([0, 1, 2, 0, 2])
expected_shape = (len(query_labels), len(key_labels))
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=False
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
self.assertEqual(positive_mask.shape, expected_shape)
self.assertEqual(negative_mask.shape, expected_shape)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
expected_positive_mask_keep_diag = np.array(
[
[1, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0],
],
dtype="bool",
)
self.assertTrue(
np.all(positive_mask == expected_positive_mask_keep_diag)
)
self.assertTrue(
np.all(
negative_mask
== np.logical_not(expected_positive_mask_keep_diag)
)
)
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=True
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
self.assertEqual(positive_mask.shape, expected_shape)
self.assertEqual(negative_mask.shape, expected_shape)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
expected_positive_mask_with_remove_diag = np.array(
[
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0],
],
dtype="bool",
)
self.assertTrue(
np.all(positive_mask == expected_positive_mask_with_remove_diag)
)
query_labels = np.array([1, 2, 3])
key_labels = np.array([1, 2, 3, 1])
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=True
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
expected_shape_diff_sizes = (len(query_labels), len(key_labels))
self.assertEqual(positive_mask.shape, expected_shape_diff_sizes)
self.assertEqual(negative_mask.shape, expected_shape_diff_sizes)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/grain_utils.py | keras/src/utils/grain_utils.py | from keras.src import backend
from keras.src import tree
def make_batch(values):
from keras.src import ops
if not values:
raise ValueError("Cannot batch 0 values. Please file a bug.")
with backend.device_scope("cpu"):
return tree.map_structure(lambda *xs: ops.stack(xs), *values)
def make_string_batch(values):
from keras.src import ops
if not values:
raise ValueError("Cannot batch 0 values. Please file a bug.")
def batch_fn(*xs):
if isinstance(xs[0], str):
if backend.backend() == "tensorflow":
import tensorflow as tf
xs = [tf.convert_to_tensor(x, dtype=tf.string) for x in xs]
xs = tf.stack(xs)
return xs
else:
return ops.stack(xs)
with backend.device_scope("cpu"):
return tree.map_structure(batch_fn, *values)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/rng_utils_test.py | keras/src/utils/rng_utils_test.py | import numpy as np
import keras
from keras.src import backend
from keras.src.testing import test_case
from keras.src.utils import rng_utils
class TestRandomSeedSetting(test_case.TestCase):
def test_set_random_seed_with_seed_generator(self):
def get_model_output():
model = keras.Sequential(
[
keras.layers.Dense(10),
keras.layers.Dropout(0.5),
keras.layers.Dense(10),
]
)
x = np.random.random((32, 10)).astype("float32")
return model.predict(x, batch_size=16)
rng_utils.set_random_seed(42)
y1 = get_model_output()
# Second call should produce different results.
y2 = get_model_output()
self.assertNotAllClose(y1, y2)
# Re-seeding should produce the same results as the first time.
rng_utils.set_random_seed(42)
y3 = get_model_output()
self.assertAllClose(y1, y3)
# Re-seeding with a different seed should produce different results.
rng_utils.set_random_seed(1337)
y4 = get_model_output()
self.assertNotAllClose(y1, y4)
def test_set_random_seed_with_global_seed_generator(self):
rng_utils.set_random_seed(42)
y1 = backend.random.randint((32, 10), minval=0, maxval=1000)
# Second call should produce different results.
y2 = backend.random.randint((32, 10), minval=0, maxval=1000)
self.assertNotAllClose(y1, y2)
# Re-seeding should produce the same results as the first time.
rng_utils.set_random_seed(42)
y3 = backend.random.randint((32, 10), minval=0, maxval=1000)
self.assertAllClose(y1, y3)
# Re-seeding with a different seed should produce different results.
rng_utils.set_random_seed(1337)
y4 = backend.random.randint((32, 10), minval=0, maxval=1000)
self.assertNotAllClose(y1, y4)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/audio_dataset_utils.py | keras/src/utils/audio_dataset_utils.py | import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils import dataset_utils
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.module_utils import tensorflow_io as tfio
ALLOWED_FORMATS = (".wav",)
@keras_export("keras.utils.audio_dataset_from_directory")
def audio_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
batch_size=32,
sampling_rate=None,
output_sequence_length=None,
ragged=False,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False,
verbose=True,
):
"""Generates a `tf.data.Dataset` from audio files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_audio_1.wav
......a_audio_2.wav
...class_b/
......b_audio_1.wav
......b_audio_2.wav
```
Then calling `audio_dataset_from_directory(main_directory,
labels='inferred')`
will return a `tf.data.Dataset` that yields batches of audio files from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Only `.wav` files are supported at this time.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain subdirectories,
each containing audio files for a class. Otherwise, the directory
structure is ignored.
labels: Either "inferred" (labels are generated from the directory
structure), `None` (no labels), or a list/tuple of integer labels
of the same size as the number of audio files found in
the directory. Labels should be sorted according to the
alphanumeric order of the audio file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers (e.g. for
`sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are encoded as a categorical
vector (e.g. for `categorical_crossentropy` loss)
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0
or 1 (e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if "labels" is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
batch_size: Size of the batches of data. Default: 32. If `None`,
the data will not be batched
(the dataset will yield individual samples).
sampling_rate: Audio sampling rate (in samples per second).
output_sequence_length: Maximum length of an audio sequence. Audio files
longer than this will be truncated to `output_sequence_length`.
If set to `None`, then all sequences in the same batch will
be padded to the
length of the longest sequence in the batch.
ragged: Whether to return a Ragged dataset (where each sequence has its
own length). Defaults to `False`.
shuffle: Whether to shuffle the data.
If set to `False`, sorts the data in alphanumeric order.
Defaults to `True`.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1, fraction of data to
reserve for validation.
subset: Subset of the data to return. One of `"training"`,
`"validation"` or `"both"`. Only used if `validation_split` is set.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to `False`.
verbose: Whether to display number information on classes and
number of files found. Defaults to `True`.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is `None`, it yields `string` tensors of shape
`(batch_size,)`, containing the contents of a batch of audio files.
- Otherwise, it yields a tuple `(audio, labels)`, where `audio`
has shape `(batch_size, sequence_length, num_channels)` and `labels`
follows the format described
below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorical`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"The `labels` argument should be a list/tuple of integer "
"labels, of the same size as the number of audio files in "
"the target directory. If you wish to infer the labels from "
"the subdirectory names in the target directory,"
' pass `labels="inferred"`. '
"If you wish to get a dataset that only contains audio samples "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", '
'"binary", '
f"or None. Received: label_mode={label_mode}"
)
if ragged and output_sequence_length is not None:
raise ValueError(
"Cannot set both `ragged` and `output_sequence_length`"
)
if sampling_rate is not None:
if not isinstance(sampling_rate, int):
raise ValueError(
"`sampling_rate` should have an integer value. "
f"Received: sampling_rate={sampling_rate}"
)
if sampling_rate <= 0:
raise ValueError(
"`sampling_rate` should be higher than 0. "
f"Received: sampling_rate={sampling_rate}"
)
if not tfio.available:
raise ImportError(
"To use the argument `sampling_rate`, you should install "
"tensorflow_io. You can install it via `pip install "
"tensorflow-io`."
)
if labels is None or label_mode is None:
labels = None
label_mode = None
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
if batch_size is not None:
shuffle_buffer_size = batch_size * 8
else:
shuffle_buffer_size = 1024
file_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWED_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
verbose=verbose,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
if subset == "both":
train_dataset, val_dataset = get_training_and_validation_dataset(
file_paths=file_paths,
labels=labels,
validation_split=validation_split,
directory=directory,
label_mode=label_mode,
class_names=class_names,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
)
train_dataset = prepare_dataset(
dataset=train_dataset,
batch_size=batch_size,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
val_dataset = prepare_dataset(
dataset=val_dataset,
batch_size=batch_size,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return train_dataset, val_dataset
else:
dataset = get_dataset(
file_paths=file_paths,
labels=labels,
directory=directory,
validation_split=validation_split,
subset=subset,
label_mode=label_mode,
class_names=class_names,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
)
dataset = prepare_dataset(
dataset=dataset,
batch_size=batch_size,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return dataset
def prepare_dataset(
dataset,
batch_size,
class_names,
output_sequence_length,
ragged,
):
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if output_sequence_length is None and not ragged:
dataset = dataset.padded_batch(
batch_size, padded_shapes=([None, None], [])
)
else:
dataset = dataset.batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def get_training_and_validation_dataset(
file_paths,
labels,
validation_split,
directory,
label_mode,
class_names,
sampling_rate,
output_sequence_length,
ragged,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
(
file_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "training"
)
if not file_paths_train:
raise ValueError(
f"No training audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
file_paths_val, labels_val = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "validation"
)
if not file_paths_val:
raise ValueError(
f"No validation audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
train_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_train,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
)
val_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_val,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
shuffle=False,
)
return train_dataset, val_dataset
def get_dataset(
file_paths,
labels,
directory,
validation_split,
subset,
label_mode,
class_names,
sampling_rate,
output_sequence_length,
ragged,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
file_paths, labels = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
if not file_paths:
raise ValueError(
f"No audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
return paths_and_labels_to_dataset(
file_paths=file_paths,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
)
def read_and_decode_audio(
path, sampling_rate=None, output_sequence_length=None
):
"""Reads and decodes audio file."""
audio = tf.io.read_file(path)
if output_sequence_length is None:
output_sequence_length = -1
audio, default_audio_rate = tf.audio.decode_wav(
contents=audio, desired_samples=output_sequence_length
)
if sampling_rate is not None:
# default_audio_rate should have dtype=int64
default_audio_rate = tf.cast(default_audio_rate, tf.int64)
audio = tfio.audio.resample(
input=audio, rate_in=default_audio_rate, rate_out=sampling_rate
)
return audio
def paths_and_labels_to_dataset(
file_paths,
labels,
label_mode,
num_classes,
sampling_rate,
output_sequence_length,
ragged,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
"""Constructs a fixed-size dataset of audio and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
if label_mode:
label_ds = dataset_utils.labels_to_dataset_tf(
labels, label_mode, num_classes
)
ds = tf.data.Dataset.zip((path_ds, label_ds))
else:
ds = path_ds
if shuffle:
ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed)
if label_mode:
ds = ds.map(
lambda x, y: (
read_and_decode_audio(x, sampling_rate, output_sequence_length),
y,
),
num_parallel_calls=tf.data.AUTOTUNE,
)
if ragged:
ds = ds.map(
lambda x, y: (tf.RaggedTensor.from_tensor(x), y),
num_parallel_calls=tf.data.AUTOTUNE,
)
else:
ds = ds.map(
lambda x: read_and_decode_audio(
x, sampling_rate, output_sequence_length
),
num_parallel_calls=tf.data.AUTOTUNE,
)
if ragged:
ds = ds.map(
lambda x: tf.RaggedTensor.from_tensor(x),
num_parallel_calls=tf.data.AUTOTUNE,
)
return ds
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/progbar_test.py | keras/src/utils/progbar_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.utils import progbar
class ProgbarTest(testing.TestCase):
@parameterized.named_parameters(
[
("float", "float"),
("np", "np"),
("list", "list"),
]
)
def test_update(self, value_type):
if value_type == "float":
values = 1.0
elif value_type == "np":
values = np.array(1.0)
elif value_type == "list":
values = [0.0, 1.0, 2.0]
else:
raise ValueError("Unknown value_type")
pb = progbar.Progbar(target=1, verbose=1)
pb.update(1, values=[("values", values)], finalize=True)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/numerical_utils.py | keras/src/utils/numerical_utils.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.utils import tf_utils
@keras_export("keras.utils.normalize")
def normalize(x, axis=-1, order=2):
"""Normalizes an array.
If the input is a NumPy array, a NumPy array will be returned.
If it's a backend tensor, a backend tensor will be returned.
Args:
x: Array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. `order=2` for L2 norm).
Returns:
A normalized copy of the array.
"""
from keras.src import ops
if isinstance(x, np.ndarray):
# NumPy input
norm = np.atleast_1d(np.linalg.norm(x, order, axis))
norm[norm == 0] = 1
# axis cannot be `None`
axis = axis or -1
return x / np.expand_dims(norm, axis)
# Backend tensor input
return ops.nn.normalize(x, axis=axis, order=order)
@keras_export("keras.utils.to_categorical")
def to_categorical(x, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with `categorical_crossentropy`.
Args:
x: Array-like with class values to be converted into a matrix
(integers from 0 to `num_classes - 1`).
num_classes: Total number of classes. If `None`, this would be inferred
as `max(x) + 1`. Defaults to `None`.
Returns:
A binary matrix representation of the input as a NumPy array. The class
axis is placed last.
Example:
>>> a = keras.utils.to_categorical([0, 1, 2, 3], num_classes=4)
>>> print(a)
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
>>> b = np.array([.9, .04, .03, .03,
... .3, .45, .15, .13,
... .04, .01, .94, .05,
... .12, .21, .5, .17]).reshape(4,4)
>>> loss = keras.ops.categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.82807 0.1011 1.77196]
>>> loss = keras.ops.categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0. 0.]
"""
if backend.is_tensor(x):
input_shape = backend.core.shape(x)
# Shrink the last dimension if the shape is (..., 1).
if (
input_shape is not None
and len(input_shape) > 1
and input_shape[-1] == 1
):
newshape = tuple(input_shape[:-1])
x = backend.numpy.reshape(x, newshape)
return backend.nn.one_hot(x, num_classes)
x = np.array(x, dtype="int64")
input_shape = x.shape
# Shrink the last dimension if the shape is (..., 1).
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
x = x.reshape(-1)
if not num_classes:
num_classes = np.max(x) + 1
batch_size = x.shape[0]
categorical = np.zeros((batch_size, num_classes))
categorical[np.arange(batch_size), x] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype,
sparse=False,
count_weights=None,
backend_module=None,
):
"""Encodes categorical inputs according to output_mode.
Args:
inputs: the inputs to encode.
output_mode: one of `"int"`, `"one_hot"`, `"multi_hot"`, or `"count"`.
depth: number of classes, this will be the last dimension of the output.
dtype: the dtype of the output, unless `count_weights` is not `None`.
sparse: whether the output should be sparse for backends supporting it.
count_weights: weights to apply if `output_mode` is `"count"`.
backend_module: the backend to use instead of the current one.
Returns: the encoded inputs.
"""
backend_module = backend_module or backend
if output_mode == "int":
return backend_module.cast(inputs, dtype=dtype)
rank_of_inputs = len(backend_module.shape(inputs))
# In all cases, we should uprank scalar input to a single sample.
if rank_of_inputs == 0:
inputs = backend_module.numpy.expand_dims(inputs, -1)
rank_of_inputs = 1
if (
backend_module.__name__.endswith("tensorflow")
and rank_of_inputs <= 2
and output_mode in ("multi_hot", "count")
):
# TF only fastpath. Uses bincount; faster. Doesn't work for rank 3+.
try:
return tf_utils.tf_encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype=dtype,
sparse=sparse,
count_weights=count_weights,
)
except ValueError:
pass
if output_mode == "multi_hot":
return backend_module.nn.multi_hot(
inputs, depth, dtype=dtype, sparse=sparse
)
elif output_mode == "one_hot":
input_shape = backend_module.core.shape(inputs)
# Shrink the last dimension if the shape is (..., 1).
if (
input_shape is not None
and len(input_shape) > 1
and input_shape[-1] == 1
):
newshape = tuple(input_shape[:-1])
inputs = backend_module.numpy.reshape(inputs, newshape)
return backend_module.nn.one_hot(
inputs, depth, dtype=dtype, sparse=sparse
)
elif output_mode == "count":
# We don't use `ops.bincount` because its output has a dynamic shape
# (last dimension is the highest value of `inputs`). We implement a
# narrower use case where `minlength` and `maxlength` (not supported by
# `ops.bincount`) are the same and static value: `depth`. We also don't
# need to support indices that are negative or greater than `depth`.
reduction_axis = 1 if len(inputs.shape) > 1 else 0
if count_weights is not None:
dtype = count_weights.dtype
one_hot_encoding = backend_module.nn.one_hot(
inputs, depth, dtype=dtype, sparse=sparse
)
if count_weights is not None:
count_weights = backend_module.numpy.expand_dims(count_weights, -1)
one_hot_encoding = one_hot_encoding * count_weights
outputs = backend_module.numpy.sum(
one_hot_encoding,
axis=reduction_axis,
)
return outputs
def build_pos_neg_masks(
query_labels,
key_labels,
remove_diagonal=True,
):
from keras.src import ops
if ops.ndim(query_labels) == 1:
query_labels = ops.reshape(query_labels, (-1, 1))
if ops.ndim(key_labels) == 1:
key_labels = ops.reshape(key_labels, (-1, 1))
positive_mask = ops.equal(query_labels, ops.transpose(key_labels))
negative_mask = ops.logical_not(positive_mask)
if remove_diagonal:
positive_mask = ops.logical_and(
positive_mask,
~ops.eye(
ops.size(query_labels),
ops.size(key_labels),
k=0,
dtype="bool",
),
)
return positive_mask, negative_mask
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/timeseries_dataset_utils.py | keras/src/utils/timeseries_dataset_utils.py | import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.module_utils import tensorflow as tf
@keras_export(
[
"keras.utils.timeseries_dataset_from_array",
"keras.preprocessing.timeseries_dataset_from_array",
]
)
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None,
):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Args:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
`targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass `None` if you don't have target data (in this case the dataset
will only yield the input data).
sequence_length: Length of the output sequences
(in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for creating a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one). If `None`, the data will not be batched
(the dataset will yield individual samples).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A `tf.data.Dataset` instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 98]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 2 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 98).
Example 2: Temporal regression.
Consider an array `data` of scalar values, of shape `(steps,)`.
To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data[:-10]
targets = data[10:]
dataset = timeseries_dataset_from_array(
input_data, targets, sequence_length=10)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
# Corresponding target: step 10
assert np.array_equal(targets[0], data[10])
break
```
Example 3: Temporal regression for many-to-many architectures.
Consider two arrays of scalar values `X` and `Y`,
both of shape `(100,)`. The resulting dataset should consist samples with
20 timestamps each. The samples should not overlap.
To generate a dataset that uses the current timestamp
to predict the corresponding target timestep, you would use:
```python
X = np.arange(100)
Y = X*2
sample_length = 20
input_dataset = timeseries_dataset_from_array(
X, None, sequence_length=sample_length, sequence_stride=sample_length)
target_dataset = timeseries_dataset_from_array(
Y, None, sequence_length=sample_length, sequence_stride=sample_length)
for batch in zip(input_dataset, target_dataset):
inputs, targets = batch
assert np.array_equal(inputs[0], X[:sample_length])
# second sample equals output timestamps 20-40
assert np.array_equal(targets[1], Y[sample_length:2*sample_length])
break
```
"""
if start_index:
if start_index < 0:
raise ValueError(
"`start_index` must be 0 or greater. Received: "
f"start_index={start_index}"
)
if start_index >= len(data):
raise ValueError(
"`start_index` must be lower than the length of the "
f"data. Received: start_index={start_index}, for data "
f"of length {len(data)}"
)
if end_index:
if start_index and end_index <= start_index:
raise ValueError(
"`end_index` must be higher than `start_index`. "
f"Received: start_index={start_index}, and "
f"end_index={end_index} "
)
if end_index >= len(data):
raise ValueError(
"`end_index` must be lower than the length of the "
f"data. Received: end_index={end_index}, for data of "
f"length {len(data)}"
)
if end_index <= 0:
raise ValueError(
"`end_index` must be higher than 0. "
f"Received: end_index={end_index}"
)
# Validate strides
if sampling_rate <= 0:
raise ValueError(
"`sampling_rate` must be higher than 0. Received: "
f"sampling_rate={sampling_rate}"
)
if sampling_rate >= len(data):
raise ValueError(
"`sampling_rate` must be lower than the length of the "
f"data. Received: sampling_rate={sampling_rate}, for data "
f"of length {len(data)}"
)
if sequence_stride <= 0:
raise ValueError(
"`sequence_stride` must be higher than 0. Received: "
f"sequence_stride={sequence_stride}"
)
if sequence_stride >= len(data):
raise ValueError(
"`sequence_stride` must be lower than the length of the "
f"data. Received: sequence_stride={sequence_stride}, for "
f"data of length {len(data)}"
)
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory
# usage).
num_seqs = end_index - start_index - (sequence_length - 1) * sampling_rate
if targets is not None:
num_seqs = min(num_seqs, len(targets))
if num_seqs < 2147483647:
index_dtype = "int32"
else:
index_dtype = "int64"
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = tf.cast(sequence_length, dtype=index_dtype)
sampling_rate = tf.cast(sampling_rate, dtype=index_dtype)
positions_ds = tf.data.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)
).map(
lambda i, positions: tf.range(
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate,
),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)
).map(
lambda i, positions: positions[i],
num_parallel_calls=tf.data.AUTOTUNE,
)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index
)
dataset = tf.data.Dataset.zip((dataset, target_ds))
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
else:
if shuffle:
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = tf.data.Dataset.from_tensors(array[start_index:end_index])
dataset = tf.data.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: tf.gather(steps, inds),
num_parallel_calls=tf.data.AUTOTUNE,
)
return dataset
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/traceback_utils.py | keras/src/utils/traceback_utils.py | import inspect
import os
import traceback
import types
from functools import wraps
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
_EXCLUDED_PATHS = (
os.path.abspath(os.path.join(__file__, "..", "..")),
os.path.join("tensorflow", "python"),
)
@keras_export("keras.config.enable_traceback_filtering")
def enable_traceback_filtering():
"""Turn on traceback filtering.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras.config.disable_traceback_filtering()` and
`keras.config.is_traceback_filtering_enabled()`.
If you have previously disabled traceback filtering via
`keras.config.disable_traceback_filtering()`, you can re-enable it via
`keras.config.enable_traceback_filtering()`.
"""
global_state.set_global_attribute("traceback_filtering", True)
@keras_export("keras.config.disable_traceback_filtering")
def disable_traceback_filtering():
"""Turn off traceback filtering.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras.config.enable_traceback_filtering()` and
`keras.config.is_traceback_filtering_enabled()`.
If you have previously disabled traceback filtering via
`keras.config.disable_traceback_filtering()`, you can re-enable it via
`keras.config.enable_traceback_filtering()`.
"""
global_state.set_global_attribute("traceback_filtering", False)
@keras_export("keras.config.is_traceback_filtering_enabled")
def is_traceback_filtering_enabled():
"""Check if traceback filtering is enabled.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras.config.enable_traceback_filtering()` and
`keras.config.disable_traceback_filtering()`.
If you have previously disabled traceback filtering via
`keras.config.disable_traceback_filtering()`, you can re-enable it via
`keras.config.enable_traceback_filtering()`.
Returns:
Boolean, `True` if traceback filtering is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("traceback_filtering", True)
def include_frame(fname):
for exclusion in _EXCLUDED_PATHS:
if exclusion in fname:
return False
return True
def _process_traceback_frames(tb):
"""Iterate through traceback frames and return a new, filtered traceback."""
last_tb = None
tb_list = list(traceback.walk_tb(tb))
for f, line_no in reversed(tb_list):
if include_frame(f.f_code.co_filename):
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
if last_tb is None and tb_list:
# If no frames were kept during filtering, create a new traceback
# from the outermost function.
f, line_no = tb_list[-1]
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
return last_tb
def filter_traceback(fn):
"""Filter out Keras-internal traceback frames in exceptions raised by fn."""
@wraps(fn)
def error_handler(*args, **kwargs):
if not is_traceback_filtering_enabled():
return fn(*args, **kwargs)
filtered_tb = None
try:
return fn(*args, **kwargs)
except Exception as e:
filtered_tb = _process_traceback_frames(e.__traceback__)
# To get the full stack trace, call:
# `keras.config.disable_traceback_filtering()`
raise e.with_traceback(filtered_tb) from None
finally:
del filtered_tb
return error_handler
def inject_argument_info_in_traceback(fn, object_name=None):
"""Add information about call argument values to an error message.
Arguments:
fn: Function to wrap. Exceptions raised by the this function will be
re-raised with additional information added to the error message,
displaying the values of the different arguments that the function
was called with.
object_name: String, display name of the class/function being called,
e.g. `'layer "layer_name" (LayerClass)'`.
Returns:
A wrapped version of `fn`.
"""
if backend.backend() == "tensorflow":
from tensorflow import errors as tf_errors
else:
tf_errors = None
@wraps(fn)
def error_handler(*args, **kwargs):
if not is_traceback_filtering_enabled():
return fn(*args, **kwargs)
signature = None
bound_signature = None
try:
return fn(*args, **kwargs)
except Exception as e:
if hasattr(e, "_keras_call_info_injected"):
# Only inject info for the innermost failing call
raise e
signature = inspect.signature(fn)
try:
# The first argument is `self`, so filter it out
bound_signature = signature.bind(*args, **kwargs)
except TypeError:
# Likely unbindable arguments
raise e
# Add argument context
arguments_context = []
for arg in list(signature.parameters.values()):
if arg.name in bound_signature.arguments:
value = tree.map_structure(
format_argument_value,
bound_signature.arguments[arg.name],
)
else:
value = arg.default
arguments_context.append(f" • {arg.name}={value}")
if arguments_context:
arguments_context = "\n".join(arguments_context)
# Get original error message and append information to it.
if tf_errors is not None and isinstance(e, tf_errors.OpError):
message = e.message
elif e.args:
# Canonically, the 1st argument in an exception is the error
# message. This works for all built-in Python exceptions.
message = e.args[0]
else:
message = ""
display_name = f"{object_name if object_name else fn.__name__}"
message = (
f"Exception encountered when calling {display_name}.\n\n"
f"\x1b[1m{message}\x1b[0m\n\n"
f"Arguments received by {display_name}:\n"
f"{arguments_context}"
)
# Reraise exception, with added context
if tf_errors is not None and isinstance(e, tf_errors.OpError):
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
else:
try:
# For standard exceptions such as ValueError, TypeError,
# etc.
new_e = e.__class__(message)
except TypeError:
# For any custom error that doesn't have a standard
# signature.
new_e = RuntimeError(message)
new_e._keras_call_info_injected = True
else:
new_e = e
raise new_e.with_traceback(e.__traceback__) from None
finally:
del signature
del bound_signature
return error_handler
def format_argument_value(value):
if backend.is_tensor(value):
# Simplified representation for eager / graph tensors
# to keep messages readable
if backend.backend() == "tensorflow":
tensor_cls = "tf.Tensor"
elif backend.backend() == "jax":
tensor_cls = "jnp.ndarray"
elif backend.backend() == "torch":
tensor_cls = "torch.Tensor"
elif backend.backend() == "numpy":
tensor_cls = "np.ndarray"
else:
tensor_cls = "array"
return (
f"{tensor_cls}(shape={value.shape}, "
f"dtype={backend.standardize_dtype(value.dtype)})"
)
return repr(value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/io_utils.py | keras/src/utils/io_utils.py | import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
message = str(message)
if is_interactive_logging_enabled():
message = f"{message}\n" if line_break else message
try:
sys.stdout.write(message)
except UnicodeEncodeError:
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
# To address this, replace special unicode characters in the
# message, and then encode and decode using the target encoding.
message = _replace_special_unicode_character(message)
# Fallback to UTF-8 when `sys.stdout.encoding` is `None` (e.g. when
# stdout is redirected). This prevents a `TypeError` that would be
# raised by `bytes.encode(None)` / `bytes.decode(None)`.
encoding = sys.stdout.encoding or "utf-8"
message_bytes = message.encode(encoding, errors="ignore")
message = message_bytes.decode(encoding)
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
def _replace_special_unicode_character(message):
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
return message
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/image_dataset_utils_test.py | keras/src/utils/image_dataset_utils_test.py | import os
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.utils import image_dataset_utils
from keras.src.utils import image_utils
from keras.src.utils.module_utils import tensorflow as tf
class ImageDatasetFromDirectoryTest(testing.TestCase):
def _get_images(self, count=16, color_mode="rgb"):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == "grayscale":
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == "rgba":
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
if backend.config.image_data_format() == "channels_first":
img = np.transpose(img, (2, 0, 1))
img = image_utils.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
color_mode="rgb",
count=16,
):
# Generate paths to class subdirectories
temp_dir = self.get_temp_dir()
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[i % len(paths)]
if color_mode == "rgb":
ext = "jpg"
else:
ext = "png"
filename = os.path.join(path, f"image_{i}.{ext}")
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_no_labels(self, format):
# Test retrieving images without labels from a directory and its
# subdirs.
# Save a few extra images in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, img in enumerate(self._get_images(3)):
filename = f"image_{i}.jpg"
img.save(os.path.join(directory, filename))
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
labels=None,
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
self.assertEqual(dataset.class_names, None)
batch = next(iter(dataset))
# We return plain images
self.assertEqual(list(batch.shape), output_shape)
self.assertDType(batch, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_binary(self, format):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="int",
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 3]
else:
output_shape = [8, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="binary",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
self.assertEqual(list(batch[1].shape), [8, 1])
self.assertDType(batch[1], "float32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
self.assertEqual(list(batch[1].shape), [8, 2])
self.assertDType(batch[1], "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
test_case = self
if backend.config.image_data_format() == "channels_last":
output_shape = [None, 18, 18, 3]
else:
output_shape = [None, 3, 18, 18]
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), output_shape)
symbolic_fn(dataset)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_sample_count(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
format=format,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_multiclass(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 3]
else:
output_shape = [8, 3, 18, 18]
batch = next(iter(dataset))
self.assertEqual(list(batch.shape), output_shape)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
format=format,
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="int",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
self.assertEqual(list(batch[1].shape), [8, 4])
self.assertDType(batch[1], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_color_modes(self, format):
directory = self._prepare_directory(num_classes=4, color_mode="rgba")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
color_mode="rgba",
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 4]
else:
output_shape = [8, 4, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
directory = self._prepare_directory(
num_classes=4, color_mode="grayscale"
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
color_mode="grayscale",
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 1]
else:
output_shape = [8, 1, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
self.assertDType(batch[0], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_validation_split(self, format):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="training",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if backend.config.image_data_format() == "channels_last":
train_output_shape = [8, 18, 18, 3]
val_output_shape = [2, 18, 18, 3]
else:
train_output_shape = [8, 3, 18, 18]
val_output_shape = [2, 3, 18, 18]
self.assertEqual(list(batch[0].shape), train_output_shape)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="validation",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), val_output_shape)
(
train_dataset,
val_dataset,
) = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="both",
seed=1337,
format=format,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), train_output_shape)
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), val_output_shape)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_manual_labels(self, format):
# Case: wrong number of labels
directory = self._prepare_directory(num_classes=1, count=4)
with self.assertRaisesRegex(ValueError, "match the number of files"):
image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0],
shuffle=False,
format=format,
)
# Case: single directory
directory = self._prepare_directory(num_classes=1, count=4)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0, 1],
shuffle=False,
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [18, 18, 3]
else:
output_shape = [3, 18, 18]
self.assertEqual(dataset.class_names, ["0", "1"])
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), [4] + output_shape)
self.assertAllClose(batch[1], [0, 1, 0, 1])
# Case: multiple directories
directory = self._prepare_directory(num_classes=3, count=6)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0, 1, 1, 1],
shuffle=False,
format=format,
)
self.assertEqual(dataset.class_names, ["0", "1"])
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), [6] + output_shape)
self.assertAllClose(batch[1], [0, 1, 0, 1, 1, 1])
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_follow_links(self, format):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
format=format,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_no_images(self, format):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No images found."):
_ = image_dataset_utils.image_dataset_from_directory(
directory, format=format
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_crop_to_aspect_ratio(self, format):
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
crop_to_aspect_ratio=True,
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_pad_to_aspect_ratio(self, format):
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
pad_to_aspect_ratio=True,
format=format,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(list(batch[0].shape), output_shape)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_errors(self, format):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="other", format=format
)
with self.assertRaisesRegex(ValueError, "`color_mode` must be one of"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, color_mode="other", format=format
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = image_dataset_utils.image_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
format=format,
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1], format=format
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"], format=format
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="binary", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=2, format=format
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.2, subset="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory,
validation_split=0.0,
subset="training",
format=format,
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = image_dataset_utils.image_dataset_from_directory(
directory,
validation_split=0.2,
subset="training",
format=format,
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_not_batched(self, format):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=None,
image_size=(18, 18),
label_mode=None,
shuffle=False,
format=format,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 3)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_image_dataset_from_directory_shuffle(self, format):
# TODO: add same test for train/val
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=False,
format=format,
)
batches_1 = []
batches_2 = []
for b in dataset:
batches_1.append(ops.convert_to_numpy(b))
batches_1 = np.concatenate(batches_1, axis=0)
for b in dataset:
batches_2.append(ops.convert_to_numpy(b))
batches_2 = np.concatenate(batches_2, axis=0)
self.assertAllClose(batches_1, batches_2, atol=1e-6)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=True,
seed=1337,
format=format,
)
batches_1 = []
batches_2 = []
for b in dataset:
batches_1.append(ops.convert_to_numpy(b))
batches_1 = np.concatenate(batches_1, axis=0)
for b in dataset:
batches_2.append(ops.convert_to_numpy(b))
batches_2 = np.concatenate(batches_2, axis=0)
if format == "tf":
self.assertNotAllClose(batches_1, batches_2, atol=1e-6)
else:
# Grain shuffles deterministically, so we expect the same batches.
self.assertAllClose(batches_1, batches_2, atol=1e-6)
# Test random seed determinism
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=True,
seed=1337,
format=format,
)
batches_1_alt = []
for b in dataset:
batches_1_alt.append(ops.convert_to_numpy(b))
batches_1_alt = np.concatenate(batches_1_alt, axis=0)
self.assertAllClose(batches_1, batches_1_alt, atol=1e-6)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/config.py | keras/src/utils/config.py | import copy
import json
try:
import difflib
except ImportError:
difflib = None
from keras.src.api_export import keras_export
@keras_export("keras.utils.Config")
class Config:
"""A Config is a dict-like container for named values.
It offers a few advantages over a plain dict:
- Setting and retrieving values via attribute setting / getting.
- Ability to freeze the config to ensure no accidental config modifications
occur past a certain point in your program.
- Easy serialization of the whole config as JSON.
Examples:
```python
# Create a config via constructor arguments
config = Config("learning_rate"=0.1, "momentum"=0.9)
# Then keep adding to it via attribute-style setting
config.use_ema = True
config.ema_overwrite_frequency = 100
# You can also add attributes via dict-like access
config["seed"] = 123
# You can retrieve entries both via attribute-style
# access and dict-style access
assert config.seed == 100
assert config["learning_rate"] == 0.1
```
A config behaves like a dict:
```python
config = Config("learning_rate"=0.1, "momentum"=0.9)
for k, v in config.items():
print(f"{k}={v}")
print(f"keys: {list(config.keys())}")
print(f"values: {list(config.values())}")
```
In fact, it can be turned into one:
```python
config = Config("learning_rate"=0.1, "momentum"=0.9)
dict_config = config.as_dict()
```
You can easily serialize a config to JSON:
```python
config = Config("learning_rate"=0.1, "momentum"=0.9)
json_str = config.to_json()
```
You can also freeze a config to prevent further changes:
```python
config = Config()
config.optimizer = "adam"
config.seed = 123
# Freeze the config to prevent changes.
config.freeze()
assert config.frozen
config.foo = "bar" # This will raise an error.
```
"""
__attrs__ = None
def __init__(self, **kwargs):
self._config = kwargs
self._frozen = False
self.__attrs__ = set(dir(self))
@property
def frozen(self):
"""Returns True if the config is frozen."""
return self._frozen
def freeze(self):
"""Marks the config as frozen, preventing any ulterior modification."""
self._frozen = True
def unfreeze(self):
self._frozen = False
def _raise_if_frozen(self):
if self._frozen:
raise ValueError(
"Cannot mutate attribute(s) because the config is frozen."
)
def as_dict(self):
return copy.copy(self._config)
def to_json(self):
return json.dumps(self._config)
def keys(self):
return self._config.keys()
def values(self):
return self._config.values()
def items(self):
return self._config.items()
def pop(self, *args):
self._raise_if_frozen()
return self._config.pop(*args)
def update(self, *args, **kwargs):
self._raise_if_frozen()
return self._config.update(*args, **kwargs)
def get(self, keyname, value=None):
return self._config.get(keyname, value)
def __setattr__(self, name, value):
attrs = object.__getattribute__(self, "__attrs__")
if attrs is None or name in attrs:
return object.__setattr__(self, name, value)
self._raise_if_frozen()
self._config[name] = value
def __getattr__(self, name):
attrs = object.__getattribute__(self, "__attrs__")
if attrs is None or name in attrs:
return object.__getattribute__(self, name)
if name in self._config:
return self._config[name]
msg = f"Unknown attribute: '{name}'."
if difflib is not None:
closest_matches = difflib.get_close_matches(
name, self._config.keys(), n=1, cutoff=0.7
)
if closest_matches:
msg += f" Did you mean '{closest_matches[0]}'?"
raise AttributeError(msg)
def __setitem__(self, key, item):
self._raise_if_frozen()
self._config[key] = item
def __getitem__(self, key):
return self._config[key]
def __repr__(self):
return f"<Config {self._config}>"
def __iter__(self):
keys = sorted(self._config.keys())
for k in keys:
yield k
def __len__(self):
return len(self._config)
def __delitem__(self, key):
self._raise_if_frozen()
del self._config[key]
def __contains__(self, item):
return item in self._config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/__init__.py | keras/src/utils/__init__.py | from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.python_utils import default
from keras.src.utils.python_utils import is_default
from keras.src.utils.python_utils import removeprefix
from keras.src.utils.python_utils import removesuffix
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/python_utils.py | keras/src/utils/python_utils.py | import binascii
import codecs
import marshal
import os
import types as python_types
def is_continuous_axis(axis):
# Used to determine whether the dimensions in an axis are continuous
if isinstance(axis, int) or len(axis) == 1:
return True
positive_order_flag = True
for i in range(len(axis) - 1):
if axis[i + 1] - axis[i] != 1:
positive_order_flag = False
break
negative_order_flag = True
for i in range(len(axis) - 1):
if axis[i + 1] - axis[i] != 1:
negative_order_flag = False
break
return positive_order_flag or negative_order_flag
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def func_dump(func):
"""Serializes a user-defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode("ascii"), "base64")
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode("raw_unicode_escape")
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure
)
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
Args:
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
Returns:
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
def removeprefix(x, prefix):
"""Backport of `removeprefix` from PEP-616 (Python 3.9+)"""
if len(prefix) > 0 and x.startswith(prefix):
return x[len(prefix) :]
else:
return x
def removesuffix(x, suffix):
"""Backport of `removesuffix` from PEP-616 (Python 3.9+)"""
if len(suffix) > 0 and x.endswith(suffix):
return x[: -len(suffix)]
else:
return x
def remove_by_id(lst, value):
"""Remove a value from a list by id."""
for i, v in enumerate(lst):
if id(v) == id(value):
del lst[i]
return
def pythonify_logs(logs):
"""Flatten and convert log values to Python-native types.
This function attempts to convert dict value by `float(value)` and skips
the conversion if it fails.
Args:
logs: A dict containing log values.
Returns:
A flattened dict with values converted to Python-native types if
possible.
"""
from keras.src import backend
logs = logs or {}
result = {}
for key, value in sorted(logs.items()):
if isinstance(value, dict):
result.update(pythonify_logs(value))
else:
try:
# Prevent torch compiler from breaking the graph.
if backend.is_tensor(value):
value = backend.convert_to_numpy(value)
value = float(value)
except:
pass
result[key] = value
return result
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/jax_utils.py | keras/src/utils/jax_utils.py | from keras.src import backend
def is_in_jax_tracing_scope(x=None):
if backend.backend() == "jax":
if x is None:
x = backend.numpy.ones(())
for c in x.__class__.__mro__:
if c.__name__ == "Tracer" and c.__module__.startswith("jax"):
return True
return False
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/jax_layer_test.py | keras/src/utils/jax_layer_test.py | import math
import os
import jax
import jax.numpy as jnp
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import random
from keras.src import saving
from keras.src import testing
from keras.src import tree
from keras.src import utils
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.saving import object_registration
from keras.src.utils.jax_layer import FlaxLayer
from keras.src.utils.jax_layer import JaxLayer
try:
import flax
except ImportError:
flax = None
num_classes = 10
input_shape = (28, 28, 1) # Excluding batch_size
@object_registration.register_keras_serializable()
def jax_stateless_init(rng, inputs):
layer_sizes = [784, 300, 100, 10]
params = []
w_init = jax.nn.initializers.glorot_normal()
b_init = jax.nn.initializers.normal(0.1)
for m, n in zip(layer_sizes[:-1], layer_sizes[1:]):
rng, w_rng = jax.random.split(rng)
rng, b_rng = jax.random.split(rng)
params.append([w_init(w_rng, (m, n)), b_init(b_rng, (n,))])
return params
@object_registration.register_keras_serializable()
def jax_stateless_apply(params, inputs):
activations = inputs.reshape((inputs.shape[0], -1)) # flatten
for w, b in params[:-1]:
outputs = jnp.dot(activations, w) + b
activations = jnp.tanh(outputs)
final_w, final_b = params[-1]
logits = jnp.dot(activations, final_w) + final_b
return jax.nn.softmax(logits, axis=-1)
@object_registration.register_keras_serializable()
def jax_stateful_init(rng, inputs, training):
params = jax_stateless_init(rng, inputs)
state = jnp.zeros([], jnp.int32)
return params, state
@object_registration.register_keras_serializable()
def jax_stateful_apply(params, state, inputs, training):
outputs = jax_stateless_apply(params, inputs)
if training:
state = state + 1
return outputs, state
if flax is not None:
@object_registration.register_keras_serializable()
class FlaxTrainingIndependentModel(flax.linen.Module):
@flax.linen.compact
def forward(self, inputs):
x = inputs
x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x)
x = flax.linen.relu(x)
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = flax.linen.Conv(features=64, kernel_size=(3, 3))(x)
x = flax.linen.relu(x)
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = flax.linen.Dense(features=200)(x)
x = flax.linen.relu(x)
x = flax.linen.Dense(features=10)(x)
x = flax.linen.softmax(x)
return x
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
@object_registration.register_keras_serializable()
class FlaxDropoutModel(flax.linen.Module):
@flax.linen.compact
def my_apply(self, inputs, training):
x = inputs
x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x)
x = flax.linen.relu(x)
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = flax.linen.Conv(features=64, kernel_size=(3, 3))(x)
x = flax.linen.relu(x)
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = flax.linen.Dense(features=200)(x)
x = flax.linen.Dropout(rate=0.3, deterministic=not training)(x)
x = flax.linen.relu(x)
x = flax.linen.Dense(features=10)(x)
x = flax.linen.softmax(x)
return x
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
@object_registration.register_keras_serializable()
def flax_dropout_wrapper(module, x, training):
return module.my_apply(x, training)
@object_registration.register_keras_serializable()
class FlaxBatchNormModel(flax.linen.Module):
@flax.linen.compact
def __call__(self, inputs, training=False):
ura = not training
x = inputs
x = flax.linen.Conv(
features=12, kernel_size=(3, 3), use_bias=False
)(x)
x = flax.linen.BatchNorm(use_running_average=ura, use_scale=False)(
x
)
x = flax.linen.relu(x)
x = flax.linen.Conv(
features=24, kernel_size=(6, 6), strides=(2, 2)
)(x)
x = flax.linen.BatchNorm(use_running_average=ura, use_scale=False)(
x
)
x = flax.linen.relu(x)
x = flax.linen.Conv(
features=32, kernel_size=(6, 6), strides=(2, 2)
)(x)
x = flax.linen.BatchNorm(use_running_average=ura, use_scale=False)(
x
)
x = x.reshape((x.shape[0], -1)) # flatten
x = flax.linen.Dense(features=200, use_bias=True)(x)
x = flax.linen.BatchNorm(use_running_average=ura, use_scale=False)(
x
)
x = flax.linen.Dropout(rate=0.3, deterministic=not training)(x)
x = flax.linen.relu(x)
x = flax.linen.Dense(features=10)(x)
x = flax.linen.softmax(x)
return x
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
FLAX_OBJECTS = {
"FlaxTrainingIndependentModel": FlaxTrainingIndependentModel,
"FlaxBatchNormModel": FlaxBatchNormModel,
"FlaxDropoutModel": FlaxDropoutModel,
"flax_dropout_wrapper": flax_dropout_wrapper,
}
@pytest.mark.skipif(
backend.backend() not in ["jax", "tensorflow"],
reason="JaxLayer and FlaxLayer are only supported with JAX and TF backend",
)
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="GPU test failure")
class TestJaxLayer(testing.TestCase):
def _test_layer(
self,
model_name,
layer_class,
layer_init_kwargs,
trainable_weights,
trainable_params,
non_trainable_weights,
non_trainable_params,
):
# Fake MNIST data
x_train = random.uniform(shape=(320, 28, 28, 1))
y_train_indices = ops.cast(
ops.random.uniform(shape=(320,), minval=0, maxval=num_classes),
dtype="int32",
)
y_train = ops.one_hot(y_train_indices, num_classes, dtype="int32")
x_test = random.uniform(shape=(32, 28, 28, 1))
def _count_params(weights):
count = 0
for weight in weights:
count = count + math.prod(ops.shape(weight))
return count
def verify_weights_and_params(layer):
self.assertEqual(trainable_weights, len(layer.trainable_weights))
self.assertEqual(
trainable_params,
_count_params(layer.trainable_weights),
)
self.assertEqual(
non_trainable_weights, len(layer.non_trainable_weights)
)
self.assertEqual(
non_trainable_params,
_count_params(layer.non_trainable_weights),
)
# functional model
layer1 = layer_class(**layer_init_kwargs)
inputs1 = layers.Input(shape=input_shape)
outputs1 = layer1(inputs1)
model1 = models.Model(
inputs=inputs1, outputs=outputs1, name=f"{model_name}1"
)
model1.summary()
verify_weights_and_params(layer1)
model1.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=[metrics.CategoricalAccuracy()],
)
tw1_before_fit = tree.map_structure(
backend.convert_to_numpy, layer1.trainable_weights
)
ntw1_before_fit = tree.map_structure(
backend.convert_to_numpy, layer1.non_trainable_weights
)
model1.fit(x_train, y_train, epochs=1, steps_per_epoch=10)
tw1_after_fit = tree.map_structure(
backend.convert_to_numpy, layer1.trainable_weights
)
ntw1_after_fit = tree.map_structure(
backend.convert_to_numpy, layer1.non_trainable_weights
)
# verify both trainable and non-trainable weights did change after fit
for before, after in zip(tw1_before_fit, tw1_after_fit):
self.assertNotAllClose(before, after)
for before, after in zip(ntw1_before_fit, ntw1_after_fit):
self.assertNotAllClose(before, after)
expected_ouput_shape = (ops.shape(x_test)[0], num_classes)
output1 = model1(x_test)
self.assertEqual(output1.shape, expected_ouput_shape)
predict1 = model1.predict(x_test, steps=1)
self.assertEqual(predict1.shape, expected_ouput_shape)
# verify both trainable and non-trainable weights did not change
tw1_after_call = tree.map_structure(
backend.convert_to_numpy, layer1.trainable_weights
)
ntw1_after_call = tree.map_structure(
backend.convert_to_numpy, layer1.non_trainable_weights
)
for after_fit, after_call in zip(tw1_after_fit, tw1_after_call):
self.assertAllClose(after_fit, after_call)
for after_fit, after_call in zip(ntw1_after_fit, ntw1_after_call):
self.assertAllClose(after_fit, after_call)
exported_params = jax.tree_util.tree_map(
backend.convert_to_numpy, layer1.params
)
if layer1.state is not None:
exported_state = jax.tree_util.tree_map(
backend.convert_to_numpy, layer1.state
)
else:
exported_state = None
def verify_identical_model(model):
output = model(x_test)
self.assertAllClose(output1, output)
predict = model.predict(x_test, steps=1)
self.assertAllClose(predict1, predict)
# sequential model to compare results
layer2 = layer_class(
params=exported_params,
state=exported_state,
input_shape=input_shape,
**layer_init_kwargs,
)
model2 = models.Sequential([layer2], name=f"{model_name}2")
model2.summary()
verify_weights_and_params(layer2)
model2.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=[metrics.CategoricalAccuracy()],
)
verify_identical_model(model2)
# save, load back and compare results
path = os.path.join(self.get_temp_dir(), "jax_layer_model.keras")
model2.save(path)
model3 = saving.load_model(path)
layer3 = model3.layers[0]
model3.summary()
verify_weights_and_params(layer3)
verify_identical_model(model3)
# export, load back and compare results
path = os.path.join(self.get_temp_dir(), "jax_layer_export")
model2.export(path, format="tf_saved_model")
model4 = tf.saved_model.load(path)
output4 = model4.serve(x_test)
# The output difference is greater when using the GPU or bfloat16
lower_precision = testing.jax_uses_gpu() or "dtype" in layer_init_kwargs
self.assertAllClose(
output1,
output4,
atol=1e-2 if lower_precision else 1e-6,
rtol=1e-3 if lower_precision else 1e-6,
)
# test subclass model building without a build method
class TestModel(models.Model):
def __init__(self, layer):
super().__init__()
self._layer = layer
def call(self, inputs):
return self._layer(inputs)
layer5 = layer_class(**layer_init_kwargs)
model5 = TestModel(layer5)
output5 = model5(x_test)
self.assertNotAllClose(output5, 0.0)
@parameterized.named_parameters(
{
"testcase_name": "training_independent",
"init_kwargs": {
"call_fn": jax_stateless_apply,
"init_fn": jax_stateless_init,
},
"trainable_weights": 6,
"trainable_params": 266610,
"non_trainable_weights": 0,
"non_trainable_params": 0,
},
{
"testcase_name": "training_state",
"init_kwargs": {
"call_fn": jax_stateful_apply,
"init_fn": jax_stateful_init,
},
"trainable_weights": 6,
"trainable_params": 266610,
"non_trainable_weights": 1,
"non_trainable_params": 1,
},
{
"testcase_name": "training_state_dtype_policy",
"init_kwargs": {
"call_fn": jax_stateful_apply,
"init_fn": jax_stateful_init,
"dtype": DTypePolicy("mixed_float16"),
},
"trainable_weights": 6,
"trainable_params": 266610,
"non_trainable_weights": 1,
"non_trainable_params": 1,
},
)
def test_jax_layer(
self,
init_kwargs,
trainable_weights,
trainable_params,
non_trainable_weights,
non_trainable_params,
):
self._test_layer(
init_kwargs["call_fn"].__name__,
JaxLayer,
init_kwargs,
trainable_weights,
trainable_params,
non_trainable_weights,
non_trainable_params,
)
@parameterized.named_parameters(
{
"testcase_name": "training_independent_bound_method",
"flax_model_class": "FlaxTrainingIndependentModel",
"flax_model_method": "forward",
"init_kwargs": {},
"trainable_weights": 8,
"trainable_params": 648226,
"non_trainable_weights": 0,
"non_trainable_params": 0,
},
{
"testcase_name": "training_rng_unbound_method",
"flax_model_class": "FlaxDropoutModel",
"flax_model_method": None,
"init_kwargs": {
"method": "flax_dropout_wrapper",
},
"trainable_weights": 8,
"trainable_params": 648226,
"non_trainable_weights": 0,
"non_trainable_params": 0,
},
{
"testcase_name": "training_rng_state_no_method",
"flax_model_class": "FlaxBatchNormModel",
"flax_model_method": None,
"init_kwargs": {},
"trainable_weights": 13,
"trainable_params": 354258,
"non_trainable_weights": 8,
"non_trainable_params": 536,
},
{
"testcase_name": "training_rng_unbound_method_dtype_policy",
"flax_model_class": "FlaxDropoutModel",
"flax_model_method": None,
"init_kwargs": {
"method": "flax_dropout_wrapper",
"dtype": DTypePolicy("mixed_float16"),
},
"trainable_weights": 8,
"trainable_params": 648226,
"non_trainable_weights": 0,
"non_trainable_params": 0,
},
)
@pytest.mark.skipif(flax is None, reason="Flax library is not available.")
def test_flax_layer(
self,
flax_model_class,
flax_model_method,
init_kwargs,
trainable_weights,
trainable_params,
non_trainable_weights,
non_trainable_params,
):
flax_model_class = FLAX_OBJECTS.get(flax_model_class)
if "method" in init_kwargs:
init_kwargs["method"] = FLAX_OBJECTS.get(init_kwargs["method"])
def create_wrapper(**kwargs):
params = kwargs.pop("params") if "params" in kwargs else None
state = kwargs.pop("state") if "state" in kwargs else None
if params and state:
variables = {**params, **state}
elif params:
variables = params
elif state:
variables = state
else:
variables = None
kwargs["variables"] = variables
flax_model = flax_model_class()
if flax_model_method:
kwargs["method"] = getattr(flax_model, flax_model_method)
return FlaxLayer(flax_model_class(), **kwargs)
self._test_layer(
flax_model_class.__name__,
create_wrapper,
init_kwargs,
trainable_weights,
trainable_params,
non_trainable_weights,
non_trainable_params,
)
def test_with_no_init_fn_and_no_params(self):
def jax_fn(params, inputs):
return inputs
with self.assertRaises(ValueError):
JaxLayer(jax_fn)
def test_with_training_in_call_fn_but_not_init_fn(self):
def jax_call_fn(params, state, rng, inputs, training):
return inputs, {}
def jax_init_fn(rng, inputs):
return {}, {}
layer = JaxLayer(jax_call_fn, jax_init_fn)
layer(np.ones((1,)))
def test_with_different_argument_order(self):
def jax_call_fn(training, inputs, rng, state, params):
return inputs, {}
def jax_init_fn(training, inputs, rng):
return {}, {}
layer = JaxLayer(jax_call_fn, jax_init_fn)
layer(np.ones((1,)))
def test_with_minimal_arguments(self):
def jax_call_fn(inputs):
return inputs
def jax_init_fn(inputs):
return {}
layer = JaxLayer(jax_call_fn, jax_init_fn)
layer(np.ones((1,)))
def test_with_missing_inputs_in_call_fn(self):
def jax_call_fn(params, rng, training):
return jnp.ones((1,))
def jax_init_fn(rng, inputs):
return {}
with self.assertRaisesRegex(ValueError, "`call_fn`.*`inputs`"):
JaxLayer(jax_call_fn, jax_init_fn)
def test_with_missing_inputs_in_init_fn(self):
def jax_call_fn(params, rng, inputs, training):
return jnp.ones((1,))
def jax_init_fn(rng, training):
return {}
with self.assertRaisesRegex(ValueError, "`init_fn`.*`inputs`"):
JaxLayer(jax_call_fn, jax_init_fn)
def test_with_unsupported_argument_in_call_fn(self):
def jax_call_fn(params, rng, inputs, mode):
return jnp.ones((1,))
def jax_init_fn(rng, inputs):
return {}
with self.assertRaisesRegex(ValueError, "`call_fn`.*`mode`"):
JaxLayer(jax_call_fn, jax_init_fn)
def test_with_unsupported_argument_in_init_fn(self):
def jax_call_fn(params, rng, inputs, training):
return inputs
def jax_init_fn(rng, inputs, mode):
return {}
with self.assertRaisesRegex(ValueError, "`init_fn`.*`mode`"):
JaxLayer(jax_call_fn, jax_init_fn)
def test_with_structures_as_inputs_and_outputs(self):
def jax_fn(params, inputs):
a = inputs["a"]
b = inputs["b"]
output1 = jnp.concatenate([a, b], axis=1)
output2 = jnp.concatenate([b, a], axis=1)
return output1, output2
layer = JaxLayer(jax_fn, params={})
inputs = {
"a": layers.Input((None, 3)),
"b": layers.Input((None, 3)),
}
outputs = layer(inputs)
model = models.Model(inputs, outputs)
test_inputs = {
"a": np.ones((2, 6, 3)),
"b": np.ones((2, 7, 3)),
}
test_outputs = model(test_inputs)
self.assertAllClose(test_outputs[0], np.ones((2, 13, 3)))
self.assertAllClose(test_outputs[1], np.ones((2, 13, 3)))
def test_with_polymorphic_shape_more_than_26_dimension_names(self):
def jax_fn(params, inputs):
return jnp.concatenate(inputs, axis=1)
layer = JaxLayer(jax_fn, params=())
inputs = [layers.Input((None, 3)) for _ in range(60)]
output = layer(inputs)
model = models.Model(inputs, output)
test_inputs = [np.ones((2, 1, 3))] * 60
test_output = model(test_inputs)
self.assertAllClose(test_output, np.ones((2, 60, 3)))
@pytest.mark.skipif(flax is None, reason="Flax library is not available.")
def test_with_flax_state_no_params(self):
class MyFlaxLayer(flax.linen.Module):
@flax.linen.compact
def __call__(self, x):
def zeros_init(shape):
return jnp.zeros(shape, jnp.int32)
count = self.variable("a", "b", zeros_init, [])
count.value = count.value + 1
return x
layer = FlaxLayer(MyFlaxLayer(), variables={"a": {"b": 0}})
layer(np.ones((1,)))
self.assertLen(layer.params, 0)
self.assertEqual(layer.state["a"]["b"].value, 1)
def test_with_state_none_leaves(self):
def jax_fn(params, state, inputs):
return inputs, state
layer = JaxLayer(jax_fn, state={"foo": None})
self.assertIsNone(layer.state["foo"])
layer(np.ones((1,)))
def test_with_state_non_tensor_leaves(self):
def jax_fn(params, state, inputs):
return inputs, state
layer = JaxLayer(jax_fn, state={"foo": "bar"})
self.assertEqual(layer.state["foo"], "bar")
# layer cannot be invoked as jax2tf will fail on strings
def test_with_state_jax_registered_node_class(self):
@jax.tree_util.register_pytree_node_class
class NamedPoint:
def __init__(self, x, y, name):
self.x = x
self.y = y
self.name = name
def tree_flatten(self):
return ((self.x, self.y), self.name)
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children, aux_data)
def jax_fn(params, state, inputs):
return inputs, state
layer = JaxLayer(jax_fn, state=[NamedPoint(1.0, 2.0, "foo")])
layer(np.ones((1,)))
@parameterized.named_parameters(
{
"testcase_name": "sequence_instead_of_mapping",
"init_state": [0.0],
"error_regex": "Expected dict, got ",
},
{
"testcase_name": "mapping_instead_of_sequence",
"init_state": {"state": {"foo": 0.0}},
"error_regex": "Expected list, got ",
},
{
"testcase_name": "sequence_instead_of_variable",
"init_state": {"state": [[0.0]]},
"error_regex": "Structure mismatch",
},
{
"testcase_name": "no_initial_state",
"init_state": None,
"error_regex": "Expected dict, got None",
},
{
"testcase_name": "missing_dict_key",
"init_state": {"state": {}},
"error_regex": "Expected list, got ",
},
{
"testcase_name": "missing_variable_in_list",
"init_state": {"state": {"foo": [2.0]}},
"error_regex": "Expected list, got ",
},
)
def test_state_mismatch_during_update(self, init_state, error_regex):
def jax_fn(params, state, inputs):
return inputs, {"state": [jnp.ones([])]}
layer = JaxLayer(jax_fn, params={}, state=init_state)
with self.assertRaisesRegex(ValueError, error_regex):
layer(np.ones((1,)))
def test_rng_seeding(self):
def jax_init(rng, inputs):
return [jax.nn.initializers.normal(1.0)(rng, inputs.shape)]
def jax_apply(params, inputs):
return jnp.dot(inputs, params[0])
shape = (2, 2)
utils.set_random_seed(0)
layer1 = JaxLayer(jax_apply, jax_init)
layer1.build(shape)
utils.set_random_seed(0)
layer2 = JaxLayer(jax_apply, jax_init)
layer2.build(shape)
self.assertAllClose(layer1.params[0], layer2.params[0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/io_utils_test.py | keras/src/utils/io_utils_test.py | import sys
import tempfile
from unittest.mock import patch
from keras.src.testing import test_case
from keras.src.utils import io_utils
class TestIoUtils(test_case.TestCase):
def test_enable_interactive_logging(self):
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
def test_disable_interactive_logging(self):
io_utils.disable_interactive_logging()
self.assertFalse(io_utils.is_interactive_logging_enabled())
def test_set_logging_verbosity_valid(self):
valid_levels = ["FATAL", "ERROR", "WARNING", "INFO", "DEBUG"]
for level in valid_levels:
io_utils.set_logging_verbosity(level)
def test_set_logging_verbosity_invalid(self):
with self.assertRaises(ValueError):
io_utils.set_logging_verbosity("INVALID")
@patch("builtins.input", side_effect=["y"])
def test_ask_to_proceed_with_overwrite_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["n"])
def test_ask_to_proceed_with_overwrite_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("sys.stdout.write")
def test_print_msg_interactive_with_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=True)
mock_write.assert_called_once_with("Hello\n")
@patch("sys.stdout.write")
def test_print_msg_interactive_without_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=False)
mock_write.assert_called_once_with("Hello")
@patch("absl.logging.info")
def test_print_msg_non_interactive(self, mock_logging):
io_utils.disable_interactive_logging()
io_utils.print_msg("Hello")
mock_logging.assert_called_once_with("Hello")
@patch("builtins.input", side_effect=["invalid", "invalid", "y"])
def test_ask_to_proceed_with_overwrite_invalid_then_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["invalid", "n"])
def test_ask_to_proceed_with_overwrite_invalid_then_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
def test_print_msg_with_different_encoding(self):
# https://github.com/keras-team/keras/issues/19386
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
ori_stdout = sys.stdout
with tempfile.TemporaryFile(mode="w", encoding="cp1251") as tmp:
sys.stdout = tmp
io_utils.print_msg("━")
sys.stdout = ori_stdout
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/timeseries_dataset_utils_test.py | keras/src/utils/timeseries_dataset_utils_test.py | import numpy as np
from keras.src import testing
from keras.src.utils import timeseries_dataset_utils
class TimeseriesDatasetTest(testing.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5
)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
# Check target values
self.assertAllClose(targets, inputs[:, 0] * 2)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
self.assertAllClose(
inputs[j], np.arange(i * 5 + j, i * 5 + j + 9)
)
def test_timeseries_regression(self):
# Test simple timeseries regression use case
data = np.arange(10)
offset = 3
targets = data[offset:]
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=offset, batch_size=1
)
i = 0
for batch in dataset:
self.assertLen(batch, 2)
inputs, targets = batch
self.assertEqual(inputs.shape, (1, 3))
# Check values
self.assertAllClose(targets[0], data[offset + i])
self.assertAllClose(inputs[0], data[i : i + offset])
i += 1
self.assertEqual(i, 7) # Expect 7 batches
def test_no_targets(self):
data = np.arange(50)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=10, batch_size=5
)
# Expect 9 batches
i = None
for i, batch in enumerate(dataset):
if i < 8:
self.assertEqual(batch.shape, (5, 10))
elif i == 8:
self.assertEqual(batch.shape, (1, 10))
for j in range(min(5, len(batch))):
# Check each sample in the batch
self.assertAllClose(
batch[j], np.arange(i * 5 + j, i * 5 + j + 10)
)
self.assertEqual(i, 8)
def test_shuffle(self):
# Test cross-epoch random order and seed determinism
data = np.arange(10)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
first_seq = None
for x, y in dataset.take(1):
self.assertNotAllClose(x, np.arange(0, 5))
self.assertAllClose(x[:, 0] * 2, y)
first_seq = x
# Check that a new iteration with the same dataset yields different
# results
for x, _ in dataset.take(1):
self.assertNotAllClose(x, first_seq)
# Check determinism with same seed
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
for x, _ in dataset.take(1):
self.assertAllClose(x, first_seq)
def test_sampling_rate(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sampling_rate=2
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 16:
self.assertEqual(inputs.shape, (5, 9))
if i == 16:
# Last batch: size 4
self.assertEqual(inputs.shape, (4, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 + j
end_index = start_index + 9 * 2
self.assertAllClose(
inputs[j], np.arange(start_index, end_index, 2)
)
def test_sequence_stride(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sequence_stride=3
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 6:
self.assertEqual(inputs.shape, (5, 9))
if i == 6:
# Last batch: size 1
self.assertEqual(inputs.shape, (1, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 * 3 + j * 3
end_index = start_index + 9
self.assertAllClose(
inputs[j], np.arange(start_index, end_index)
)
def test_start_and_end_index(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
None,
sequence_length=9,
batch_size=5,
sequence_stride=3,
sampling_rate=2,
start_index=10,
end_index=90,
)
for batch in dataset:
self.assertLess(np.max(batch[0]), 90)
self.assertGreater(np.min(batch[0]), 9)
def test_errors(self):
# bad start index
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=-1
)
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=11
)
# bad end index
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=-1
)
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=11
)
# bad sampling_rate
with self.assertRaisesRegex(ValueError, "`sampling_rate` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sampling_rate=0
)
# bad sequence stride
with self.assertRaisesRegex(ValueError, "`sequence_stride` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sequence_stride=0
)
def test_not_batched(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=9, batch_size=None, shuffle=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/dtype_utils.py | keras/src/utils/dtype_utils.py | from keras.src import backend
from keras.src import ops
DTYPE_TO_SIZE = {
**{f"float{i}": i for i in (16, 32, 64)},
**{f"int{i}": i for i in (8, 16, 32, 64)},
**{f"uint{i}": i for i in (8, 16, 32, 64)},
"bfloat16": 16,
"bool": 1,
}
def dtype_size(dtype):
size = DTYPE_TO_SIZE.get(dtype, None)
if size is None:
raise ValueError(f"Invalid dtype: {dtype}")
return size
def is_float(dtype):
return "float" in dtype
def cast_to_common_dtype(tensors):
"""Cast a list of tensors to a common dtype.
If any tensor is floating-point, they will all be casted to the most-precise
floating-point dtype. Otherwise the tensors are not casted.
Args:
tensors: A list of tensors.
Returns:
Same list, casted to a common dtype.
"""
highest_float = None
highest_float_size = (
-1
) # Initially set to an impossible value for comparison
for x in tensors:
dtype = backend.standardize_dtype(x.dtype)
if is_float(dtype):
if highest_float is None or dtype_size(dtype) > highest_float_size:
highest_float = dtype
highest_float_size = dtype_size(dtype)
elif dtype == "float16" and highest_float == "bfloat16":
highest_float = "float32"
highest_float_size = dtype_size(highest_float)
if highest_float:
tensors = [ops.cast(x, highest_float) for x in tensors]
return tensors
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/summary_utils_test.py | keras/src/utils/summary_utils_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_op_as_layer(self):
inputs = layers.Input((2,))
x = layers.Dense(4)(inputs)
outputs = ops.mean(x)
model = models.Model(inputs, outputs)
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(
model, print_fn=print_to_variable, show_trainable=True
)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense
self.assertIn("Y", summary_content) # dense
self.assertIn("()", summary_content) # mean
self.assertIn("-", summary_content) # mean
self.assertIn("Total params: 12", summary_content)
self.assertIn("Trainable params: 12", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_with_mha(self):
# In Keras <= 3.6, MHA exposes `output_shape` property which breaks this
# test.
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.mha = layers.MultiHeadAttention(2, 2, output_shape=(4,))
def call(self, inputs):
return self.mha(inputs, inputs, inputs)
model = MyModel()
model(np.ones((1, 2, 2)))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(1, 2, 4)", summary_content) # mha
self.assertIn("Total params: 56", summary_content)
self.assertIn("Trainable params: 56", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/progbar.py | keras/src/utils/progbar.py | import math
import os
import sys
import time
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils import io_utils
@keras_export("keras.utils.Progbar")
class Progbar:
"""Displays a progress bar.
Args:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not*
be averaged over time. Metrics in this list will be displayed as-is.
All others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(
self,
target,
width=20,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name="step",
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = (
(hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
or "ipykernel" in sys.modules
or "posix" in sys.modules
or "PYCHARM_HOSTED" in os.environ
)
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_at_epoch_start = self._start
self._time_after_first_step = None
self._prev_total_width = 0
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is
in `stateful_metrics`, `value_for_last_step` will be displayed
as-is. Else, an average of the metric over time will be
displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in
# the first epoch, both on_batch_end and on_epoch_end will be
# called, which will cause 'current' and 'self._seen_so_far' to
# have the same value. Force the minimal value to 1 here,
# otherwise stateful_metric will be 0s.
if finalize:
self._values[k] = [v, 1]
else:
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
message = ""
special_char_len = 0
now = time.time()
time_per_unit = self._estimate_step_duration(current, now)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
if self._dynamic_display:
message += "\b" * self._prev_total_width
message += "\r"
else:
message += "\n"
if self.target is not None:
numdigits = int(math.log10(self.target)) + 1
bar = (f"%{numdigits}d/%d") % (current, self.target)
bar = f"\x1b[1m{bar}\x1b[0m "
special_char_len += 8
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += f"\33[32m{'━' * prog_width}\x1b[0m"
special_char_len += 9
bar += f"\33[37m{'━' * (self.width - prog_width)}\x1b[0m"
special_char_len += 9
else:
bar = "%7d/Unknown" % current
message += bar
# Add ETA if applicable
if self.target is not None and not finalize:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = "%d:%02d:%02d" % (
eta // 3600,
(eta % 3600) // 60,
eta % 60,
)
elif eta > 60:
eta_format = "%d:%02d" % (eta // 60, eta % 60)
else:
eta_format = "%ds" % eta
info = f" \x1b[1m{eta_format}\x1b[0m"
else:
# Time elapsed since start, in seconds
info = f" \x1b[1m{now - self._start:.0f}s\x1b[0m"
special_char_len += 8
# Add time/step
info += self._format_time(time_per_unit, self.unit_name)
# Add metrics
for k in self._values_order:
info += f" - {k}:"
if isinstance(self._values[k], list):
values, count = self._values[k]
if not isinstance(values, float):
values = np.mean(values)
avg = values / max(1, count)
if abs(avg) > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
else:
info += f" {self._values[k]}"
message += info
total_width = len(bar) + len(info) - special_char_len
if self._prev_total_width > total_width:
message += " " * (self._prev_total_width - total_width)
if finalize:
message += "\n"
io_utils.print_msg(message, line_break=False)
self._prev_total_width = total_width
message = ""
elif self.verbose == 2:
if finalize:
numdigits = int(math.log10(self.target)) + 1
count = f"%{numdigits}d/%d" % (current, self.target)
info = f"{count} - {now - self._start:.0f}s"
info += f" -{self._format_time(time_per_unit, self.unit_name)}"
for k in self._values_order:
info += f" - {k}:"
values, count = self._values[k]
if not isinstance(values, float):
values = np.mean(values)
avg = values / max(1, count)
if avg > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
info += "\n"
message += info
io_utils.print_msg(message, line_break=False)
message = ""
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _format_time(self, time_per_unit, unit_name):
"""format a given duration to display to the user.
Given the duration, this function formats it in either milliseconds
or seconds and displays the unit (i.e. ms/step or s/epoch).
Args:
time_per_unit: the duration to display
unit_name: the name of the unit to display
Returns:
A string with the correctly formatted duration and units
"""
formatted = ""
if time_per_unit >= 1 or time_per_unit == 0:
formatted += f" {time_per_unit:.0f}s/{unit_name}"
elif time_per_unit >= 1e-3:
formatted += f" {time_per_unit * 1000.0:.0f}ms/{unit_name}"
else:
formatted += f" {time_per_unit * 1000000.0:.0f}us/{unit_name}"
return formatted
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now` this
function returns an estimate for how long a single step takes. If this
is called before one step has been completed (i.e. `current == 0`) then
zero is given as an estimate. The duration estimate ignores the duration
of the (assumed to be non-representative) first step for estimates when
more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying
# step 1
# 2) somebody is calling the progress bar and supplies step one
# multiple times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (
current - 1
)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/text_dataset_utils.py | keras/src/utils/text_dataset_utils.py | import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils import dataset_utils
from keras.src.utils.grain_utils import make_string_batch
from keras.src.utils.module_utils import grain
from keras.src.utils.module_utils import tensorflow as tf
@keras_export(
[
"keras.utils.text_dataset_from_directory",
"keras.preprocessing.text_dataset_from_directory",
]
)
def text_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
batch_size=32,
max_length=None,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False,
format="tf",
verbose=True,
):
"""Generates a dataset from text files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
Then calling `text_dataset_from_directory(main_directory,
labels='inferred')` will return a dataset that yields batches of
texts from the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Only `.txt` files are supported at this time.
By default, this function will return a `tf.data.Dataset` object. You can
set `format="grain"` to return a `grain.IterDataset` object instead, which
removes the TensorFlow dependency.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain
subdirectories, each containing text files for a class.
Otherwise, the directory structure is ignored.
labels: Either `"inferred"`
(labels are generated from the directory structure),
`None` (no labels),
or a list/tuple of integer labels of the same size as the number of
text files found in the directory. Labels should be sorted according
to the alphanumeric order of the text file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if `"labels"` is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
batch_size: Size of the batches of data.
If `None`, the data will not be batched
(the dataset will yield individual samples).
Defaults to `32`.
max_length: Maximum size of a text string. Texts longer than this will
be truncated to `max_length`.
shuffle: Whether to shuffle the data.
If set to `False`, sorts the data in alphanumeric order.
Defaults to `True`.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: Subset of the data to return.
One of `"training"`, `"validation"` or `"both"`.
Only used if `validation_split` is set.
When `subset="both"`, the utility returns a tuple of two datasets
(the training and validation datasets respectively).
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to `False`.
format: The format of the return object. Defaults to `"tf"`. Available
options are:
- `"tf"`: returns a `tf.data.Dataset` object. Requires
TensorFlow to be installed.
- `"grain"`: returns a `grain.IterDataset` object. Requires
Grain to be installed.
verbose: Whether to display number information on classes and
number of files found. Defaults to `True`.
Returns:
A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
(`format="grain"`) object.
When `format="tf"`:
- If `label_mode` is `None`, it yields `string` tensors of shape
`(batch_size,)`, containing the contents of a batch of text files.
- Otherwise, it yields a tuple `(texts, labels)`, where `texts`
has shape `(batch_size,)` and `labels` follows the format described
below.
When `format="grain"`:
- If `label_mode` is `None`, it yields a list of Python strings containing
the contents of a batch of text files.
- Otherwise, it yields a tuple `(texts, labels)`, where `texts`
is a list of Python strings and `labels` follows the format described
below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorical`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"`labels` argument should be a list/tuple of integer labels, "
"of the same size as the number of text files in the target "
"directory. If you wish to infer the labels from the "
"subdirectory names in the target directory, "
'pass `labels="inferred"`. '
"If you wish to get a dataset that only contains text samples "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", '
'"categorical", "binary", '
f"or None. Received: label_mode={label_mode}"
)
if format not in ("tf", "grain"):
raise ValueError(
'`format` should be either "tf" or "grain". '
f"Received: format={format}"
)
if labels is None or label_mode is None:
labels = None
label_mode = None
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=(".txt",),
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
verbose=verbose,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
if batch_size is not None:
shuffle_buffer_size = batch_size * 8
else:
shuffle_buffer_size = 1024
if subset == "both":
(
file_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "training"
)
(
file_paths_val,
labels_val,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "validation"
)
if not file_paths_train:
raise ValueError(
f"No training text files found in directory {directory}. "
"Allowed format: .txt"
)
if not file_paths_val:
raise ValueError(
f"No validation text files found in directory {directory}. "
"Allowed format: .txt"
)
train_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_train,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
format=format,
)
val_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_val,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
shuffle=False,
format=format,
)
if format == "tf":
if batch_size is not None:
train_dataset = train_dataset.batch(batch_size)
val_dataset = val_dataset.batch(batch_size)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
else:
train_dataset = train_dataset.to_iter_dataset()
val_dataset = val_dataset.to_iter_dataset()
if batch_size is not None:
train_dataset = train_dataset.batch(
batch_size, batch_fn=make_string_batch
)
val_dataset = val_dataset.batch(
batch_size, batch_fn=make_string_batch
)
# Users may need to reference `class_names`.
train_dataset.class_names = class_names
val_dataset.class_names = class_names
dataset = [train_dataset, val_dataset]
else:
file_paths, labels = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
if not file_paths:
raise ValueError(
f"No text files found in directory {directory}. "
"Allowed format: .txt"
)
dataset = paths_and_labels_to_dataset(
file_paths=file_paths,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
format=format,
)
if format == "tf":
if batch_size is not None:
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
else:
dataset = dataset.to_iter_dataset()
if batch_size is not None:
dataset = dataset.batch(batch_size, batch_fn=make_string_batch)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def paths_and_labels_to_dataset(
file_paths,
labels,
label_mode,
num_classes,
max_length,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
format="tf",
):
"""Constructs a dataset of text strings and labels."""
if format == "tf":
return _paths_and_labels_to_dataset_tf(
file_paths,
labels,
label_mode,
num_classes,
max_length,
shuffle,
shuffle_buffer_size,
seed,
)
elif format == "grain":
return _paths_and_labels_to_dataset_grain(
file_paths,
labels,
label_mode,
num_classes,
max_length,
shuffle,
shuffle_buffer_size,
seed,
)
def _paths_and_labels_to_dataset_tf(
file_paths,
labels,
label_mode,
num_classes,
max_length,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
"""Constructs a dataset of text strings and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
if label_mode:
label_ds = dataset_utils.labels_to_dataset_tf(
labels, label_mode, num_classes
)
ds = tf.data.Dataset.zip((path_ds, label_ds))
else:
ds = path_ds
if shuffle:
ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed)
if label_mode:
ds = ds.map(
lambda x, y: (_path_to_string_content_tf(x, max_length), y),
num_parallel_calls=tf.data.AUTOTUNE,
)
else:
ds = ds.map(
lambda x: _path_to_string_content_tf(x, max_length),
num_parallel_calls=tf.data.AUTOTUNE,
)
return ds
def _path_to_string_content_tf(path, max_length):
txt = tf.io.read_file(path)
if max_length is not None:
txt = tf.strings.substr(txt, 0, max_length)
return txt
def _paths_and_labels_to_dataset_grain(
file_paths,
labels,
label_mode,
num_classes,
max_length,
shuffle=False,
shuffle_buffer_size=None,
seed=None,
):
"""Constructs a dataset of text strings and labels."""
path_ds = grain.MapDataset.source(file_paths)
if label_mode:
label_ds = dataset_utils.labels_to_dataset_grain(
labels, label_mode, num_classes
)
ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
else:
ds = path_ds
if shuffle:
ds = ds.shuffle(seed=seed)
if label_mode:
ds = ds.map(
lambda data: (
_path_to_string_content_grain(data[0], max_length),
data[1],
),
)
else:
ds = ds.map(lambda x: _path_to_string_content_grain(x, max_length))
return ds
def _path_to_string_content_grain(path, max_length):
with open(path, "r") as f:
txt = f.read()
if max_length is not None:
txt = txt[:max_length]
return txt
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/model_visualization.py | keras/src/utils/model_visualization.py | """Utilities related to model visualization."""
import os
import sys
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.utils import io_utils
try:
import pydot
except ImportError:
# pydot_ng and pydotplus are older forks of pydot
# which may still be used by some users
try:
import pydot_ng as pydot
except ImportError:
try:
import pydotplus as pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot is available."""
return pydot is not None
def check_graphviz():
"""Returns True if both PyDot and Graphviz are available."""
if not check_pydot():
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.PydotException):
return False
def add_edge(dot, src, dst):
src_id = str(id(src))
dst_id = str(id(dst))
if not dot.get_edge(src_id, dst_id):
edge = pydot.Edge(src_id, dst_id)
edge.set("penwidth", "2")
dot.add_edge(edge)
def get_layer_activation_name(layer):
if hasattr(layer.activation, "name"):
activation_name = layer.activation.name
elif hasattr(layer.activation, "__name__"):
activation_name = layer.activation.__name__
else:
activation_name = str(layer.activation)
return activation_name
def make_layer_label(layer, **kwargs):
class_name = layer.__class__.__name__
show_layer_names = kwargs.pop("show_layer_names")
show_layer_activations = kwargs.pop("show_layer_activations")
show_dtype = kwargs.pop("show_dtype")
show_shapes = kwargs.pop("show_shapes")
show_trainable = kwargs.pop("show_trainable")
if kwargs:
raise ValueError(f"Invalid kwargs: {kwargs}")
table = (
'<<table border="0" cellborder="1" bgcolor="black" cellpadding="10">'
)
colspan_max = sum(int(x) for x in (show_dtype, show_trainable))
if show_shapes:
colspan_max += 2
colspan = max(1, colspan_max)
if show_layer_names:
table += (
f'<tr><td colspan="{colspan}" bgcolor="black">'
'<font point-size="16" color="white">'
f"<b>{layer.name}</b> ({class_name})"
"</font></td></tr>"
)
else:
table += (
f'<tr><td colspan="{colspan}" bgcolor="black">'
'<font point-size="16" color="white">'
f"<b>{class_name}</b>"
"</font></td></tr>"
)
if (
show_layer_activations
and hasattr(layer, "activation")
and layer.activation is not None
):
table += (
f'<tr><td bgcolor="white" colspan="{colspan}">'
'<font point-size="14">'
f"Activation: <b>{get_layer_activation_name(layer)}</b>"
"</font></td></tr>"
)
cols = []
if show_shapes:
input_shape = None
output_shape = None
try:
input_shape = tree.map_structure(lambda x: x.shape, layer.input)
output_shape = tree.map_structure(lambda x: x.shape, layer.output)
except (ValueError, AttributeError):
pass
def format_shape(shape):
if shape is not None:
if isinstance(shape, dict):
shape_str = ", ".join(
[f"{k}: {v}" for k, v in shape.items()]
)
else:
shape_str = f"{shape}"
shape_str = shape_str.replace("}", "").replace("{", "")
else:
shape_str = "?"
return shape_str
if class_name != "InputLayer":
cols.append(
(
'<td bgcolor="white"><font point-size="14">'
f"Input shape: <b>{format_shape(input_shape)}</b>"
"</font></td>"
)
)
cols.append(
(
'<td bgcolor="white"><font point-size="14">'
f"Output shape: <b>{format_shape(output_shape)}</b>"
"</font></td>"
)
)
if show_dtype:
dtype = None
try:
dtype = tree.map_structure(lambda x: x.dtype, layer.output)
except (ValueError, AttributeError):
pass
cols.append(
(
'<td bgcolor="white"><font point-size="14">'
f"Output dtype: <b>{dtype or '?'}</b>"
"</font></td>"
)
)
if show_trainable and hasattr(layer, "trainable") and layer.weights:
if layer.trainable:
cols.append(
(
'<td bgcolor="forestgreen">'
'<font point-size="14" color="white">'
"<b>Trainable</b></font></td>"
)
)
else:
cols.append(
(
'<td bgcolor="firebrick">'
'<font point-size="14" color="white">'
"<b>Non-trainable</b></font></td>"
)
)
if cols:
colspan = len(cols)
else:
colspan = 1
if cols:
table += f"<tr>{''.join(cols)}</tr>"
table += "</table>>"
return table
def make_node(layer, **kwargs):
node = pydot.Node(str(id(layer)), label=make_layer_label(layer, **kwargs))
node.set("fontname", "Helvetica")
node.set("border", "0")
node.set("margin", "0")
return node
@keras_export("keras.utils.model_to_dot")
def model_to_dot(
model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=200,
subgraph=False,
show_layer_activations=False,
show_trainable=False,
**kwargs,
):
"""Convert a Keras model to dot format.
Args:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: `"TB"`
creates a vertical plot; `"LR"` creates a horizontal plot.
expand_nested: whether to expand nested Functional models
into clusters.
dpi: Image resolution in dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable.
Returns:
A `pydot.Dot` instance representing the Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
"""
from keras.src.ops.function import make_node_key
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
from keras.src.models import functional
from keras.src.models import sequential
# from keras.src.layers import Wrapper
if not check_pydot():
raise ImportError(
"You must install pydot (`pip install pydot`) for "
"model_to_dot to work."
)
if subgraph:
dot = pydot.Cluster(style="dashed", graph_name=model.name)
dot.set("label", model.name)
dot.set("labeljust", "l")
else:
dot = pydot.Dot()
dot.set("rankdir", rankdir)
dot.set("concentrate", True)
dot.set("dpi", dpi)
dot.set("splines", "ortho")
dot.set_node_defaults(shape="record")
if kwargs.pop("layer_range", None) is not None:
raise ValueError("Argument `layer_range` is no longer supported.")
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
kwargs = {
"show_layer_names": show_layer_names,
"show_layer_activations": show_layer_activations,
"show_dtype": show_dtype,
"show_shapes": show_shapes,
"show_trainable": show_trainable,
}
if isinstance(model, sequential.Sequential):
layers = model.layers
elif not isinstance(model, functional.Functional):
# We treat subclassed models as a single node.
node = make_node(model, **kwargs)
dot.add_node(node)
return dot
else:
layers = model._operations
# Create graph nodes.
for i, layer in enumerate(layers):
# Process nested functional and sequential models.
if expand_nested and isinstance(
layer, (functional.Functional, sequential.Sequential)
):
submodel = model_to_dot(
layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
dot.add_subgraph(submodel)
else:
node = make_node(layer, **kwargs)
dot.add_node(node)
# Connect nodes with edges.
if isinstance(model, sequential.Sequential):
if not expand_nested:
# Single Sequential case.
for i in range(len(layers) - 1):
add_edge(dot, layers[i], layers[i + 1])
return dot
else:
# The first layer is connected to the `InputLayer`, which is not
# represented for Sequential models, so we skip it. What will draw
# the incoming edge from outside of the sequential model is the
# edge connecting the Sequential model itself.
layers = model.layers[1:]
# Functional and nested Sequential case.
for layer in layers:
# Go from current layer to input `Node`s.
for inbound_index, inbound_node in enumerate(layer._inbound_nodes):
# `inbound_node` is a `Node`.
if (
isinstance(model, functional.Functional)
and make_node_key(layer, inbound_index) not in model._nodes
):
continue
# Go from input `Node` to `KerasTensor` representing that input.
for input_index, input_tensor in enumerate(
inbound_node.input_tensors
):
# `input_tensor` is a `KerasTensor`.
# `input_history` is a `KerasHistory`.
input_history = input_tensor._keras_history
if input_history.operation is None:
# Operation is `None` for `Input` tensors.
continue
# Go from input `KerasTensor` to the `Operation` that produced
# it as an output.
input_node = input_history.operation._inbound_nodes[
input_history.node_index
]
output_index = input_history.tensor_index
# Tentative source and destination of the edge.
source = input_node.operation
destination = layer
if not expand_nested:
# No nesting, connect directly.
add_edge(dot, source, layer)
continue
# ==== Potentially nested models case ====
# ---- Resolve the source of the edge ----
while isinstance(
source,
(functional.Functional, sequential.Sequential),
):
# When `source` is a `Functional` or `Sequential` model, we
# need to connect to the correct box within that model.
# Functional and sequential models do not have explicit
# "output" boxes, so we need to find the correct layer that
# produces the output we're connecting to, which can be
# nested several levels deep in sub-models. Hence the while
# loop to continue going into nested models until we
# encounter a real layer that's not a `Functional` or
# `Sequential`.
source, _, output_index = source.outputs[
output_index
]._keras_history
# ---- Resolve the destination of the edge ----
while isinstance(
destination,
(functional.Functional, sequential.Sequential),
):
if isinstance(destination, functional.Functional):
# When `destination` is a `Functional`, we point to the
# specific `InputLayer` in the model.
destination = destination.inputs[
input_index
]._keras_history.operation
else:
# When `destination` is a `Sequential`, there is no
# explicit "input" box, so we want to point to the first
# box in the model, but it may itself be another model.
# Hence the while loop to continue going into nested
# models until we encounter a real layer that's not a
# `Functional` or `Sequential`.
destination = destination.layers[0]
add_edge(dot, source, destination)
return dot
@keras_export("keras.utils.plot_model")
def plot_model(
model,
to_file="model.png",
show_shapes=False,
show_dtype=False,
show_layer_names=False,
rankdir="TB",
expand_nested=False,
dpi=200,
show_layer_activations=False,
show_trainable=False,
**kwargs,
):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
inputs = ...
outputs = ...
model = keras.Model(inputs=inputs, outputs=outputs)
dot_img_file = '/tmp/model_1.png'
keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: `"TB"`
creates a vertical plot; `"LR"` creates a horizontal plot.
expand_nested: whether to expand nested Functional models
into clusters.
dpi: Image resolution in dots per inch.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
if not check_pydot():
message = (
"You must install pydot (`pip install pydot`) "
"for `plot_model` to work."
)
if "IPython.core.magics.namespace" in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
io_utils.print_msg(message)
return
else:
raise ImportError(message)
if not check_graphviz():
message = (
"You must install graphviz "
"(see instructions at https://graphviz.gitlab.io/download/) "
"for `plot_model` to work."
)
if "IPython.core.magics.namespace" in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
io_utils.print_msg(message)
return
else:
raise ImportError(message)
if kwargs.pop("layer_range", None) is not None:
raise ValueError("Argument `layer_range` is no longer supported.")
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
to_file = str(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = "png"
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != "pdf":
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/naming_test.py | keras/src/utils/naming_test.py | from keras.src.testing import test_case
from keras.src.utils import naming
class NamingUtilsTest(test_case.TestCase):
def test_uniquify_unique_name(self):
name = "the_unique_name"
unique_name = naming.uniquify(name)
self.assertEqual(unique_name, name)
def test_auto_name(self):
self.assertEqual(naming.auto_name("unique_name"), "unique_name")
self.assertEqual(naming.auto_name("unique_name"), "unique_name_1")
self.assertEqual(naming.auto_name("unique_name"), "unique_name_2")
def test_get_uid(self):
self.assertEqual(naming.get_uid("very_unique_name"), 1)
self.assertEqual(naming.get_uid("very_unique_name"), 2)
self.assertEqual(naming.get_uid("very_unique_name"), 3)
def test_uniquify_non_unique_name(self):
name = "non_unique_name"
naming.uniquify(name)
unique_name = naming.uniquify(name)
self.assertEqual(unique_name, f"{name}_1")
def test_to_snake_case_snake_case_name(self):
name = "snake_case_name"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, name)
def test_get_uid_existing_prefix(self):
prefix = "existing_prefix"
naming.get_uid(prefix)
uid = naming.get_uid(prefix)
self.assertEqual(uid, 2)
def test_reset_uids(self):
naming.get_uid("unique_name")
naming.reset_uids()
uid = naming.get_uid("unique_name")
self.assertEqual(uid, 1)
def test_get_object_name_no_name_attribute(self):
class ObjectWithoutName:
__name__ = "ObjectWithoutName"
obj = ObjectWithoutName()
object_name = naming.get_object_name(obj)
self.assertEqual(object_name, "object_without_name")
def test_get_object_name_no_name_or_class_attribute(self):
class ObjectWithoutNameOrClass:
pass
obj = ObjectWithoutNameOrClass()
object_name = naming.get_object_name(obj)
self.assertEqual(object_name, "object_without_name_or_class")
def test_uniquify_already_uniquified_name(self):
name = "unique_name"
unique_name = naming.uniquify(name)
new_unique_name = naming.uniquify(unique_name)
# first time `name` is uniquified so returns same name
self.assertEqual(name, unique_name)
# second time `name` is uniquified should be different
# from the first output
self.assertNotEqual(new_unique_name, unique_name)
def test_to_snake_case_capital_after_any_character(self):
name = "myVariableNameHere"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, "my_variable_name_here")
def test_to_snake_case_lower_before_upper(self):
name = "convertTHIS"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, "convert_this")
def test_to_snake_case_already_snake_cased(self):
name = "already_snake_cased"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, name)
def test_to_snake_case_no_changes(self):
name = "lowercase"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, name)
def test_to_snake_case_single_uppercase_word(self):
name = "UPPERCASE"
snake_case_name = naming.to_snake_case(name)
self.assertEqual(snake_case_name, "uppercase")
def test_get_object_name_for_keras_objects(self):
class MockKerasObject:
name = "mock_object"
obj = MockKerasObject()
result = naming.get_object_name(obj)
self.assertEqual(
result, "mock_object", f"Expected 'mock_object' but got {result}"
)
# Test for function objects that have a `__name__` attribute.
def test_get_object_name_for_functions(self):
def mock_function():
pass
result = naming.get_object_name(mock_function)
# Assumes to_snake_case works correctly.
expected_name = naming.to_snake_case(mock_function.__name__)
self.assertEqual(
result,
expected_name,
f"Expected '{expected_name}' but got {result}",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/python_utils_test.py | keras/src/utils/python_utils_test.py | import base64
import marshal
from keras.src import testing
from keras.src.utils import python_utils
class PythonUtilsTest(testing.TestCase):
def test_func_dump_and_load(self):
def my_function(x, y=1, **kwargs):
return x + y
serialized = python_utils.func_dump(my_function)
deserialized = python_utils.func_load(serialized)
self.assertEqual(deserialized(2, y=3), 5)
def test_removesuffix(self):
x = "model.keras"
self.assertEqual(python_utils.removesuffix(x, ".keras"), "model")
self.assertEqual(python_utils.removesuffix(x, "model"), x)
def test_removeprefix(self):
x = "model.keras"
self.assertEqual(python_utils.removeprefix(x, "model"), ".keras")
self.assertEqual(python_utils.removeprefix(x, ".keras"), x)
def test_func_load_defaults_as_tuple(self):
# Using tuple as a default argument
def dummy_function(x=(1, 2, 3)):
pass
serialized = python_utils.func_dump(dummy_function)
deserialized = python_utils.func_load(serialized)
# Ensure that the defaults are still a tuple
self.assertIsInstance(deserialized.__defaults__[0], tuple)
# Ensure that the tuple default remains unchanged
self.assertEqual(deserialized.__defaults__[0], (1, 2, 3))
def test_remove_long_seq_standard_case(self):
sequences = [[1], [2, 2], [3, 3, 3], [4, 4, 4, 4]]
labels = [1, 2, 3, 4]
new_sequences, new_labels = python_utils.remove_long_seq(
3, sequences, labels
)
self.assertEqual(new_sequences, [[1], [2, 2]])
self.assertEqual(new_labels, [1, 2])
def test_func_load_with_closure(self):
def outer_fn(x):
def inner_fn(y):
return x + y
return inner_fn
func_with_closure = outer_fn(10)
serialized = python_utils.func_dump(func_with_closure)
deserialized = python_utils.func_load(serialized)
self.assertEqual(deserialized(5), 15)
def test_func_load_closure_conversion(self):
def my_function_with_closure(x):
return x + y
y = 5
serialized = python_utils.func_dump(my_function_with_closure)
deserialized = python_utils.func_load(serialized)
self.assertEqual(deserialized(5), 10)
def test_ensure_value_to_cell(self):
value_to_test = "test_value"
def dummy_fn():
value_to_test
cell_value = dummy_fn.__closure__[0].cell_contents
self.assertEqual(value_to_test, cell_value)
def test_closure_processing(self):
def simple_function(x):
return x + 10
serialized = python_utils.func_dump(simple_function)
deserialized = python_utils.func_load(serialized)
self.assertEqual(deserialized(5), 15)
def test_func_load_valid_encoded_code(self):
def another_simple_function(x):
return x * 2
raw_data = marshal.dumps(another_simple_function.__code__)
valid_encoded_code = base64.b64encode(raw_data).decode("utf-8")
try:
python_utils.func_load(valid_encoded_code)
except (UnicodeEncodeError, ValueError):
self.fail("Expected no error for valid code, but got an error.")
def test_func_load_bad_encoded_code(self):
bad_encoded_code = "This isn't valid base64!"
with self.assertRaises(AttributeError):
python_utils.func_load(bad_encoded_code)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/dtype_utils_test.py | keras/src/utils/dtype_utils_test.py | from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.testing import test_case
from keras.src.utils import dtype_utils
class DtypeSizeTests(test_case.TestCase):
def test_bfloat16_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("bfloat16"), 16)
def test_float16_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float16"), 16)
def test_float32_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float32"), 32)
def test_int32_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("int32"), 32)
def test_float64_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float64"), 64)
def test_int64_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("int64"), 64)
def test_uint8_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("uint8"), 8)
def test_bool_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("bool"), 1)
def test_invalid_dtype_size(self):
with self.assertRaises(ValueError):
dtype_utils.dtype_size("unknown_dtype")
class IsFloatTests(test_case.TestCase):
def test_is_float_float16(self):
self.assertTrue(dtype_utils.is_float("float16"))
def test_is_float_float32(self):
self.assertTrue(dtype_utils.is_float("float32"))
def test_is_float_float64(self):
self.assertTrue(dtype_utils.is_float("float64"))
def test_is_float_int32(self):
self.assertFalse(dtype_utils.is_float("int32"))
def test_is_float_bool(self):
self.assertFalse(dtype_utils.is_float("bool"))
def test_is_float_uint8(self):
self.assertFalse(dtype_utils.is_float("uint8"))
def test_is_float_containing_float(self):
self.assertTrue(dtype_utils.is_float("floating"))
def test_is_float_empty_string(self):
self.assertFalse(dtype_utils.is_float(""))
class CastToCommonDtype(test_case.TestCase):
def test_cast_to_common_dtype_float32_float64(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="float64")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float64")
def test_cast_to_common_dtype_float16_float32_float64(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="float32")
tensor3 = KerasTensor([7, 8, 9], dtype="float64")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float64")
def test_cast_to_common_dtype_float16_int16_float32(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="int16")
tensor3 = KerasTensor([7, 8, 9], dtype="float32")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_all_float32(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="float32")
tensor3 = KerasTensor([7, 8, 9], dtype="float32")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_float16_bfloat16(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="bfloat16")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float16")
def test_cast_to_common_dtype_float16_uint8(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="uint8")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float16")
def test_cast_to_common_dtype_mixed_types(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="int32")
tensor3 = KerasTensor([7, 8, 9], dtype="bool")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_no_float(self):
tensor1 = KerasTensor([1, 2, 3], dtype="int32")
tensor2 = KerasTensor([4, 5, 6], dtype="uint8")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
self.assertEqual(casted_tensors[0].dtype, "int32")
self.assertEqual(casted_tensors[1].dtype, "uint8")
def test_cast_to_common_dtype_float16_bfloat16_promotion(self):
tensor1 = KerasTensor([4, 5, 6], dtype="bfloat16")
tensor2 = KerasTensor([1, 2, 3], dtype="float16")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
# TODO failed AssertionError: 'float16' != 'float32'
# The order of the tensors matters in the current logic
# of the cast_to_common_dtype function
# def test_cast_to_common_dtype_bfloat16_float16_promotion(self):
# tensor1 = KerasTensor([1, 2, 3], dtype="float16")
# tensor2 = KerasTensor([4, 5, 6], dtype="bfloat16")
# casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
# for tensor in casted_tensors:
# self.assertEqual(tensor.dtype, "float32")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/utils/rng_utils.py | keras/src/utils/rng_utils.py | import random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.random import seed_generator
from keras.src.utils.module_utils import tensorflow as tf
GLOBAL_RANDOM_SEED = "global_random_seed"
@keras_export("keras.utils.set_random_seed")
def set_random_seed(seed):
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).
You can use this utility to make almost any Keras program fully
deterministic. Some limitations apply in cases where network communications
are involved (e.g. parameter server distribution), which creates additional
sources of randomness, or when certain non-deterministic cuDNN ops are
involved.
Calling this utility does the following:
```python
import random
random.seed(seed)
import numpy as np
np.random.seed(seed)
import tensorflow as tf # Only if TF is installed
tf.random.set_seed(seed)
import torch # Only if the backend is 'torch'
torch.manual_seed(seed)
```
Additionally, it resets the global Keras `SeedGenerator`, which is used by
`keras.random` functions when the `seed` is not provided.
Note that the TensorFlow seed is set even if you're not using TensorFlow
as your backend framework, since many workflows leverage `tf.data`
pipelines (which feature random shuffling). Likewise many workflows
might leverage NumPy APIs.
Arguments:
seed: Integer, the random seed to use.
"""
if not isinstance(seed, int):
raise ValueError(
"Expected `seed` argument to be an integer. "
f"Received: seed={seed} (of type {type(seed)})"
)
# Store seed in global state so we can query it if set.
global_state.set_global_attribute(GLOBAL_RANDOM_SEED, seed)
# Remove global SeedGenerator, it will be recreated from the seed.
global_state.set_global_attribute(
seed_generator.GLOBAL_SEED_GENERATOR, None
)
random.seed(seed)
np.random.seed(seed)
if tf.available:
tf.random.set_seed(seed)
if backend.backend() == "torch":
import torch
torch.manual_seed(seed)
def get_random_seed():
"""Returns the explicit integer random seed if set.
If the seed has been explicitly set via `set_random_seed`, then
returns the seed. Otherwise, returns `None`.
"""
return global_state.get_global_attribute(GLOBAL_RANDOM_SEED)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/backup_and_restore.py | keras/src/callbacks/backup_and_restore.py | import json
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.BackupAndRestore")
class BackupAndRestore(Callback):
"""Callback to back up and restore the training state.
`BackupAndRestore` callback is intended to recover training from an
interruption that has happened in the middle of a `Model.fit` execution, by
backing up the training states in a temporary checkpoint file, at the end of
each epoch. Each backup overwrites the previously written checkpoint file,
so at any given time there is at most one such checkpoint file for
backup/restoring purpose.
If training restarts before completion, the training state (which includes
the `Model` weights and epoch number) is restored to the most recently saved
state at the beginning of a new `Model.fit` run. At the completion of a
`Model.fit` run, the temporary checkpoint file is deleted.
Note that the user is responsible to bring jobs back after the interruption.
This callback is important for the backup and restore mechanism for fault
tolerance purpose, and the model to be restored from a previous checkpoint
is expected to be the same as the one used to back up. If user changes
arguments passed to compile or fit, the checkpoint saved for fault tolerance
can become invalid.
Example:
>>> class InterruptingCallback(keras.callbacks.Callback):
... def on_epoch_begin(self, epoch, logs=None):
... if epoch == 4:
... raise RuntimeError('Interrupting!')
>>> callback = keras.callbacks.BackupAndRestore(backup_dir="/tmp/backup")
>>> model = keras.models.Sequential([keras.layers.Dense(10)])
>>> model.compile(keras.optimizers.SGD(), loss='mse')
>>> model.build(input_shape=(None, 20))
>>> try:
... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback, InterruptingCallback()],
... verbose=0)
... except:
... pass
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> # Only 6 more epochs are run, since first training got interrupted at
>>> # zero-indexed epoch 4, second training will continue from 4 to 9.
>>> len(history.history['loss'])
>>> 6
Args:
backup_dir: String, path of directory where to store the data
needed to restore the model. The directory
cannot be reused elsewhere to store other files, e.g. by the
`BackupAndRestore` callback of another training run,
or by another callback (e.g. `ModelCheckpoint`)
of the same training run.
save_freq: `"epoch"`, integer, or `False`. When set to `"epoch"`
the callback saves the checkpoint at the end of each epoch.
When set to an integer, the callback saves the checkpoint every
`save_freq` batches. Set `save_freq=False` only if using
preemption checkpointing (i.e. with `save_before_preemption=True`).
double_checkpoint: Boolean. If enabled, `BackupAndRestore` callback
will save 2 last training states (current and previous). After
interruption if current state can't be loaded due to IO error
(e.g. file corrupted) it will try to restore previous one. Such
behaviour will consume twice more space on disk, but increase fault
tolerance. Defaults to `False`.
delete_checkpoint: Boolean. This `BackupAndRestore`
callback works by saving a checkpoint to back up the training state.
If `delete_checkpoint=True`, the checkpoint will be deleted after
training is finished. Use `False` if you'd like to keep the checkpoint
for future usage. Defaults to `True`.
"""
def __init__(
self,
backup_dir,
save_freq="epoch",
double_checkpoint=False,
delete_checkpoint=True,
):
super().__init__()
self.save_freq = save_freq
self.double_checkpoint = double_checkpoint
self.delete_checkpoint = delete_checkpoint
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self._current_epoch = 0
if not backup_dir:
raise ValueError("Empty `backup_dir` argument passed")
self.backup_dir = backup_dir
self._weights_path = file_utils.join(backup_dir, "latest.weights.h5")
self._training_metadata_path = file_utils.join(
backup_dir, "training_metadata.json"
)
self._prev_weights_path = f"{self._weights_path}.bkp"
self._prev_training_metadata_path = (
f"{self._training_metadata_path}.bkp"
)
if save_freq != "epoch" and not isinstance(save_freq, int):
raise ValueError(
"Invalid value for argument `save_freq`. "
f"Received: save_freq={save_freq}. "
"Expected either 'epoch' or an integer value."
)
def on_train_begin(self, logs=None):
try:
self._load_model()
except OSError as e:
# Weights may be corrupted. Trying to load previous one.
if not file_utils.exists(self._prev_weights_path):
raise e
file_utils.copy(self._prev_weights_path, self._weights_path)
if file_utils.exists(self._prev_training_metadata_path):
file_utils.copy(
self._prev_training_metadata_path,
self._training_metadata_path,
)
elif file_utils.exists(self._training_metadata_path):
file_utils.remove(self._training_metadata_path)
self._load_model()
def _load_model(self):
"""Get training state from temporary file and restore it."""
if not self.model.built:
raise ValueError(
"To use the BackupAndRestore callback, "
"you model must be built before you call `fit()`. "
f"Model {self.model} is unbuilt. You can build it "
"beforehand by calling it on a batch of data."
)
if file_utils.exists(self._weights_path):
if (
self.model.optimizer is not None
and not self.model.optimizer.built
):
# Make sure optimizer weights exist before loading.
self.model.optimizer.build(self.model.trainable_variables)
self.model.load_weights(self._weights_path)
if file_utils.exists(self._training_metadata_path):
with file_utils.File(self._training_metadata_path, "r") as f:
training_metadata = json.loads(f.read())
epoch = training_metadata["epoch"]
self.model._initial_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self._current_epoch = epoch + 1
self._last_batch_seen = 0
if self.save_freq == "epoch":
self._save_model()
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model()
def _save_model(self):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `"epoch"`.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
# Create host directory if it doesn't exist.
if not file_utils.exists(self.backup_dir):
file_utils.makedirs(self.backup_dir)
if self.double_checkpoint and file_utils.exists(self._weights_path):
file_utils.copy(self._weights_path, self._prev_weights_path)
if self.double_checkpoint and file_utils.exists(
self._training_metadata_path
):
file_utils.copy(
self._training_metadata_path, self._prev_training_metadata_path
)
self.model.save_weights(filepath=self._weights_path, overwrite=True)
with file_utils.File(self._training_metadata_path, "w") as f:
training_metadata = {
"epoch": self._current_epoch,
"batch": self._last_batch_seen,
}
f.write(json.dumps(training_metadata))
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def on_train_end(self, logs=None):
if self.delete_checkpoint and file_utils.exists(self.backup_dir):
file_utils.rmtree(self.backup_dir)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/history.py | keras/src/callbacks/history.py | from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
@keras_export("keras.callbacks.History")
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit()` method of models.
Example:
>>> model = Sequential([layers.Dense(10)])
>>> model.compile(SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, verbose=1)
>>> print(history.params)
{'verbose': 1, 'epochs': 10, 'steps': 1}
>>> # check the keys of history object
>>> print(history.history.keys())
dict_keys(['loss'])
"""
def __init__(self):
super().__init__()
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = []
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
# Set the history attribute on the model after the epoch ends. This will
# make sure that the state which is set is the latest one.
self.model.history = self
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/backup_and_restore_test.py | keras/src/callbacks/backup_and_restore_test.py | import numpy as np
import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.utils import file_utils
class InterruptingCallback(callbacks.Callback):
"""A callback to intentionally interrupt training."""
def __init__(self, steps_int, epoch_int):
self.batch_count = 0
self.epoch_count = 0
self.steps_int = steps_int
self.epoch_int = epoch_int
def on_epoch_end(self, epoch, log=None):
self.epoch_count += 1
if self.epoch_int is not None and self.epoch_count == self.epoch_int:
raise RuntimeError("EpochInterruption")
def on_batch_end(self, batch, logs=None):
self.batch_count += 1
if self.steps_int is not None and self.batch_count == self.steps_int:
raise RuntimeError("StepsInterruption")
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.counter = self.add_weight(
shape=(), initializer="zeros", dtype="float32", trainable=False
)
def call(self, x):
self.counter.assign_add(1)
return x
class BackupAndRestoreCallbackTest(testing.TestCase):
def make_model(self):
model = Sequential(
[
layers.Input((3,)),
CanaryLayer(),
layers.Dense(1),
]
)
model.compile(
loss="mse",
optimizer="sgd",
metrics=["mse"],
)
return model
# Check invalid save_freq, both string and non integer
def test_save_freq_unknown_error(self):
with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"):
callbacks.BackupAndRestore(
backup_dir="backup_dir", save_freq="batch"
)
with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"):
callbacks.BackupAndRestore(backup_dir="backup_dir", save_freq=0.15)
# Checking if after interruption, correct model params and
# weights are loaded in step-wise backup
@pytest.mark.requires_trainable_backend
def test_best_case_step(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
cbk = callbacks.BackupAndRestore(backup_dir, save_freq=1)
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
try:
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[
cbk,
InterruptingCallback(steps_int=2, epoch_int=None),
],
epochs=2,
verbose=0,
)
except RuntimeError:
self.assertTrue(file_utils.exists(backup_dir))
self.assertEqual(cbk._current_epoch, 0)
self.assertEqual(cbk._last_batch_seen, 1)
self.assertEqual(int(model.layers[0].counter.value), 2)
hist = model.fit(
x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5
)
self.assertEqual(cbk._current_epoch, 5)
self.assertEqual(hist.epoch[-1], 4)
self.assertEqual(int(model.layers[0].counter.value), 17)
# Checking if after interruption, correct model params and
# weights are loaded in epoch-wise backup
@pytest.mark.requires_trainable_backend
def test_best_case_epoch(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
self.assertEqual(int(model.layers[0].counter.value), 0)
cbk = callbacks.BackupAndRestore(
backup_dir=backup_dir, save_freq="epoch"
)
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
try:
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[
cbk,
InterruptingCallback(steps_int=None, epoch_int=2),
],
epochs=6,
verbose=0,
)
except RuntimeError:
self.assertEqual(cbk._current_epoch, 2)
self.assertTrue(file_utils.exists(backup_dir))
self.assertEqual(int(model.layers[0].counter.value), 6)
hist = model.fit(
x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5
)
self.assertEqual(cbk._current_epoch, 5)
self.assertEqual(hist.epoch[-1], 4)
self.assertEqual(int(model.layers[0].counter.value), 5 * 3)
# Checking if after interruption and weights corruption, previous model
# params and weights are loaded
@pytest.mark.requires_trainable_backend
def test_backup_corrupted(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
self.assertEqual(int(model.layers[0].counter.value), 0)
cbk = callbacks.BackupAndRestore(
backup_dir=backup_dir, save_freq="epoch", double_checkpoint=True
)
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
try:
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[
cbk,
InterruptingCallback(steps_int=None, epoch_int=2),
],
epochs=6,
verbose=0,
)
except RuntimeError:
self.assertEqual(cbk._current_epoch, 2)
self.assertTrue(file_utils.exists(backup_dir))
self.assertTrue(file_utils.exists(cbk._weights_path))
self.assertTrue(file_utils.exists(cbk._training_metadata_path))
self.assertTrue(file_utils.exists(cbk._prev_weights_path))
self.assertTrue(file_utils.exists(cbk._prev_training_metadata_path))
self.assertEqual(int(model.layers[0].counter.value), 6)
# Corruption weights
with file_utils.File(cbk._weights_path, "w") as f:
f.write("0")
hist = model.fit(
x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5
)
self.assertEqual(cbk._current_epoch, 5)
self.assertEqual(hist.epoch[-1], 4)
self.assertEqual(int(model.layers[0].counter.value), 5 * 3)
# Checking if after interruption, when model is deleted
@pytest.mark.requires_trainable_backend
def test_model_deleted_case_epoch(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
cbk = callbacks.BackupAndRestore(backup_dir, save_freq="epoch")
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[cbk],
epochs=2,
verbose=0,
)
self.assertFalse(file_utils.exists(backup_dir))
def test_backup_dir_empty_error(self):
with self.assertRaisesRegex(
ValueError, expected_regex="Empty `backup_dir` argument passed"
):
callbacks.BackupAndRestore(backup_dir="", save_freq="epoch")
def test_backup_dir_none_error(self):
with self.assertRaisesRegex(
ValueError, expected_regex="Empty `backup_dir` argument passed"
):
callbacks.BackupAndRestore(backup_dir=None, save_freq="epoch")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/terminate_on_nan.py | keras/src/callbacks/terminate_on_nan.py | import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import io_utils
@keras_export("keras.callbacks.TerminateOnNaN")
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
This callback monitors the loss value during training
and terminates training when a NaN or Inf loss is detected.
By default, training is stopped gracefully
by setting `model.stop_training = True`, which triggers all callback cleanup
methods including `on_train_end()`.
Alternatively, you can use `raise_error=True` to immediately raise a
RuntimeError when NaN/Inf is detected. This raise_error termination
prevents `on_train_end()` from being called on other callbacks, which
is useful for preserving backup states or preventing unintended cleanup
when training fails.
Args:
raise_error: Boolean, default False. If False, uses graceful stop via
`model.stop_training = True`. If True, immediately raises
RuntimeError on NaN/Inf loss, bypassing callback cleanup methods.
Example:
```
# Graceful termination (default)
callback = keras.callbacks.TerminateOnNaN()
model.fit(x, y, callbacks=[callback])
# raise_error termination (strict failure)
callback = keras.callbacks.TerminateOnNaN(raise_error=True)
model.fit(x, y, callbacks=[callback])
```
"""
def __init__(self, raise_error: bool = False):
super().__init__()
self.raise_error = raise_error
def on_batch_end(self, batch, logs=None):
"""Check for NaN/Inf loss at the end of each batch.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step()`.
Raises:
RuntimeError: If loss is NaN/Inf and raise_error=True.
"""
logs = logs or {}
loss = logs.get("loss")
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
if self.raise_error:
raise RuntimeError(
f"NaN or Inf loss encountered at batch {batch}. "
f"Loss value: {loss}. Terminating training immediately."
)
else:
io_utils.print_msg(
f"Batch {batch}: Invalid loss, terminating training"
)
self.model.stop_training = True
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/reduce_lr_on_plateau_test.py | keras/src/callbacks/reduce_lr_on_plateau_test.py | import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import optimizers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.testing import test_utils
from keras.src.utils import io_utils
from keras.src.utils import numerical_utils
class ReduceLROnPlateauTest(testing.TestCase):
def setUp(self):
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(3,),
num_classes=2,
)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(5), layers.Dense(2)])
model.compile(
loss="mse",
optimizer=optimizers.Adam(0.1),
)
self.model = model
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
@pytest.mark.requires_trainable_backend
def test_reduces_lr_with_model_fit(self):
reduce_lr = callbacks.ReduceLROnPlateau(
patience=1, factor=0.1, monitor="val_loss", min_delta=100
)
self.model.fit(
self.x_train,
self.y_train,
validation_data=(self.x_test, self.y_test),
callbacks=[reduce_lr],
epochs=2,
)
self.assertEqual(self.model.optimizer.learning_rate.value, 0.01)
@pytest.mark.requires_trainable_backend
def test_throws_when_optimizer_has_schedule(self):
reduce_lr = callbacks.ReduceLROnPlateau(
patience=1, factor=0.1, monitor="val_loss", min_delta=100
)
self.model.compile(
loss="mse",
optimizer=optimizers.Adam(
optimizers.schedules.PolynomialDecay(
initial_learning_rate=0.1, decay_steps=10
)
),
)
with self.assertRaisesRegex(
TypeError,
"This optimizer was created with a `LearningRateSchedule`",
):
self.model.fit(
self.x_train,
self.y_train,
validation_data=(self.x_test, self.y_test),
callbacks=[reduce_lr],
epochs=2,
)
@pytest.mark.requires_trainable_backend
def test_verbose_logging(self):
reduce_lr = callbacks.ReduceLROnPlateau(
patience=1, factor=0.1, monitor="val_loss", min_delta=100, verbose=1
)
io_utils.disable_interactive_logging()
io_utils.set_logging_verbosity("INFO")
with self.assertLogs() as logs:
self.model.fit(
self.x_train,
self.y_train,
validation_data=(self.x_test, self.y_test),
callbacks=[reduce_lr],
epochs=2,
)
expected_log = "ReduceLROnPlateau reducing learning rate to 0.01"
self.assertTrue(any(expected_log in log for log in logs.output))
@pytest.mark.requires_trainable_backend
def test_honors_min_lr(self):
reduce_lr = callbacks.ReduceLROnPlateau(
patience=1,
factor=0.1,
monitor="val_loss",
min_delta=10,
min_lr=0.005,
)
self.model.fit(
self.x_train,
self.y_train,
validation_data=(self.x_test, self.y_test),
callbacks=[reduce_lr],
epochs=4,
)
self.assertEqual(self.model.optimizer.learning_rate.value, 0.005)
@pytest.mark.requires_trainable_backend
def test_cooldown(self):
reduce_lr = callbacks.ReduceLROnPlateau(
patience=1,
factor=0.1,
monitor="val_loss",
min_delta=100,
cooldown=2,
)
self.model.fit(
self.x_train,
self.y_train,
validation_data=(self.x_test, self.y_test),
callbacks=[reduce_lr],
epochs=4,
)
# With a cooldown of 2 epochs, we should only reduce the LR every other
# epoch, so after 4 epochs we will have reduced 2 times.
self.assertAllClose(self.model.optimizer.learning_rate.value, 0.001)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/swap_ema_weights_test.py | keras/src/callbacks/swap_ema_weights_test.py | import os.path
import tempfile
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import callbacks
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import optimizers
from keras.src import saving
from keras.src import testing
from keras.src.models import Sequential
from keras.src.testing import test_utils
from keras.src.utils import numerical_utils
class SwapEMAWeightsTest(testing.TestCase):
def setUp(self):
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(3,),
num_classes=2,
random_seed=2023,
)
y_train = numerical_utils.to_categorical(y_train)
self.x_train = x_train
self.y_train = y_train
def _get_compiled_model(
self, use_ema=True, jit_compile=True, loss_scale=False
):
optimizer = optimizers.SGD(use_ema=use_ema, ema_momentum=0.9)
if loss_scale:
optimizer = optimizers.LossScaleOptimizer(optimizer)
model = Sequential(
[layers.Dense(2, kernel_initializer="ones", use_bias=False)]
)
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
jit_compile=jit_compile,
)
return model
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_with_invalid_optimizer(self):
model = self._get_compiled_model(use_ema=False)
with self.assertRaisesRegex(
ValueError,
("SwapEMAWeights must be used when `use_ema=True` is set"),
):
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights(self):
# not using SwapEMAWeights
model = self._get_compiled_model()
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is different from the evaluation
self.assertNotEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
# using SwapEMAWeights
model = self._get_compiled_model()
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is same as the evaluation
self.assertEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_on_epoch(self):
# using SwapEMAWeights together with ModelCheckpoint
model = self._get_compiled_model()
with tempfile.TemporaryDirectory() as temp_dir:
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[
callbacks.SwapEMAWeights(swap_on_epoch=True),
callbacks.ModelCheckpoint(
os.path.join(temp_dir, "{epoch:1d}.keras")
),
],
validation_data=(self.x_train, self.y_train),
)
model2 = saving.load_model(os.path.join(temp_dir, "2.keras"))
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True)
# saved checkpoint will be applied by EMA weights
self.assertEqual(
logs["mean_squared_error"],
logs2["mean_squared_error"],
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_with_loss_scale_optimizer(self):
model = self._get_compiled_model(loss_scale=True)
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is same as the evaluation
self.assertEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
def test_swap_ema_weights_with_tf_distribute(self):
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
# TODO: set jit_compile=True once the issue is resolved in
# integration_tests/tf_distribute_training_test.py#L52
model = self._get_compiled_model(jit_compile=False)
with tempfile.TemporaryDirectory() as temp_dir:
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[
callbacks.SwapEMAWeights(swap_on_epoch=True),
callbacks.ModelCheckpoint(
os.path.join(
temp_dir, "distributed_{epoch:1d}.keras"
)
),
],
validation_data=(self.x_train, self.y_train),
)
model2 = saving.load_model(
os.path.join(temp_dir, "distributed_2.keras")
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True)
# saved checkpoint will be applied by EMA weights
self.assertEqual(
logs["mean_squared_error"],
logs2["mean_squared_error"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/remote_monitor_test.py | keras/src/callbacks/remote_monitor_test.py | import warnings
from unittest import mock
import numpy as np
from conftest import skip_if_backend
from keras.src import backend
from keras.src import callbacks
from keras.src import layers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.utils import numerical_utils
try:
import requests
except ImportError:
requests = None
class TerminateOnNaNTest(testing.TestCase):
def test_RemoteMonitor(self):
if requests is None:
self.skipTest("`requests` required to run this test")
monitor = callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
warning_msg = "Could not reach RemoteMonitor root server"
with warnings.catch_warnings(record=True) as warning_logs:
warnings.simplefilter("always")
monitor.on_epoch_end(0, logs={"loss": 0.0})
self.assertIn(warning_msg, str(warning_logs[-1].message))
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
@skip_if_backend(
"openvino", "openvino backend does not support `fit` method"
)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest("`requests` required to run this test")
if backend.backend() == "numpy":
self.skipTest("Trainer not implemented from NumPy backend.")
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
NUM_CLASSES = 2
BATCH_SIZE = 4
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(NUM_CLASSES)])
model.compile(loss="mean_squared_error", optimizer="sgd")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[monitor],
epochs=1,
)
send = {
"epoch": 0,
"loss": hist.history["loss"][0],
"val_loss": hist.history["val_loss"][0],
}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/model_checkpoint.py | keras/src/callbacks/model_checkpoint.py | import os
import re
import warnings
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.callbacks.monitor_callback import MonitorCallback
from keras.src.utils import file_utils
from keras.src.utils import io_utils
@keras_export("keras.callbacks.ModelCheckpoint")
class ModelCheckpoint(MonitorCallback):
"""Callback to save the Keras model or model weights at some frequency.
`ModelCheckpoint` callback is used in conjunction with training using
`model.fit()` to save a model or weights (in a checkpoint file) at some
interval, so the model or weights can be loaded later to continue the
training from the state saved.
A few options this callback provides include:
- Whether to only keep the model that has achieved the "best performance" so
far, or whether to save the model at the end of every epoch regardless of
performance.
- Definition of "best"; which quantity to monitor and whether it should be
maximized or minimized.
- The frequency it should save at. Currently, the callback supports saving
at the end of every epoch, or after a fixed number of training batches.
- Whether only weights are saved, or the whole model is saved.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/ckpt/checkpoint.model.keras'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model is saved at the end of every epoch, if it's the best seen so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model (that are considered the best) can be loaded as -
keras.models.load_model(checkpoint_filepath)
# Alternatively, one could checkpoint just the model weights as -
checkpoint_filepath = '/tmp/ckpt/checkpoint.weights.h5'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) can be loaded as -
model.load_weights(checkpoint_filepath)
```
Args:
filepath: string or `PathLike`, path to save the model file.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and keys in `logs`
(passed in `on_epoch_end`).
The `filepath` name needs to end with `".weights.h5"` when
`save_weights_only=True` or should end with `".keras"` or `".h5"`
when checkpoint saving the whole model (default).
For example:
if `filepath` is `"{epoch:02d}-{val_loss:.2f}.keras"` or
"{epoch:02d}-{val_loss:.2f}.weights.h5"`, then the model
checkpoints will be saved with the epoch number and the validation
loss in the filename. The directory of the filepath
should not be reused by any other callbacks to avoid conflicts.
monitor: The metric name to monitor. Typically the metrics are set by
the `Model.compile` method. Note:
* Prefix the name with `"val_"` to monitor validation metrics.
* Use `"loss"` or `"val_loss"` to monitor the model's total loss.
* If you specify metrics as strings, like `"accuracy"`, pass the
same string (with or without the `"val_"` prefix).
* If you pass `metrics.Metric` objects, `monitor` should be set to
`metric.name`
* If you're not sure about the metric names you can check the
contents of the `history.history` dictionary returned by
`history = model.fit()`
* Multi-output models set additional prefixes on the metric names.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
displays messages when the callback takes an action.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" and the latest best model according to the
quantity monitored will not be overwritten. If `filepath` doesn't
contain formatting options like `{epoch}` then `filepath` will be
overwritten by each new better model.
mode: one of {`"auto"`, `"min"`, `"max"`}. If `save_best_only=True`, the
decision to overwrite the current save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `"max"`, for `val_loss` this should be
`"min"`, etc. In `"auto"` mode, the direction is automatically
inferred from the name of the monitored quantity.
save_weights_only: if `True`, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model is
saved (`model.save(filepath)`).
save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. If the `Model` is
compiled with `steps_per_execution=N`, then the saving criteria will
be checked every Nth batch. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
initial_value_threshold: Floating point initial "best" value of the
metric to be monitored. Only applies if `save_best_value=True`. Only
overwrites the model weights already saved if the performance of
current model is better than this value.
"""
def __init__(
self,
filepath,
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch",
initial_value_threshold=None,
):
super().__init__(monitor, mode, initial_value_threshold)
self.verbose = verbose
self.filepath = file_utils.path_to_string(filepath)
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError(
f"Unrecognized save_freq: {self.save_freq}. "
"Expected save_freq are 'epoch' or integer values"
)
if save_weights_only:
if not self.filepath.endswith(".weights.h5"):
raise ValueError(
"When using `save_weights_only=True` in `ModelCheckpoint`"
", the filepath provided must end in `.weights.h5` "
"(Keras weights format). Received: "
f"filepath={self.filepath}"
)
else:
if not any(
self.filepath.endswith(ext) for ext in (".keras", ".h5")
):
raise ValueError(
"The filepath provided must end in `.keras` "
"(Keras model format). Received: "
f"filepath={self.filepath}"
)
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, batch=batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
if self.monitor_op is None:
# Delay setup until the model's metrics are all built
self._set_monitor_op()
if self.save_freq == "epoch":
self._save_model(epoch=epoch, batch=None, logs=logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _should_save_model(self, epoch, batch, logs, filepath):
"""Determines whether the model should be saved.
The model should be saved in the following cases:
- self.save_best_only is False
- self.save_best_only is True and `monitor` is a numpy array or
backend tensor (falls back to `save_best_only=False`)
- self.save_best_only is True and `self.monitor_op(current, self.best)`
evaluates to True.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `"epoch"`.
logs: the `logs` dict passed in to `on_batch_end` or
`on_epoch_end`.
filepath: the path where the model would be saved
"""
logs = logs or {}
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} available.",
stacklevel=2,
)
return True
elif (
isinstance(current, np.ndarray) or backend.is_tensor(current)
) and len(current.shape) > 0:
warnings.warn(
"Can save best model only when `monitor` is "
f"a scalar value. Received: {current}. "
"Falling back to `save_best_only=False`."
)
return True
else:
best_str = "None" if self.best is None else f"{self.best:.5f}"
if self._is_improvement(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: {self.monitor} "
f"improved from {best_str} to {current:.5f}, "
f"saving model to {filepath}"
)
self.best = current
return True
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"{self.monitor} did not improve from {best_str}"
)
return False
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: saving model to {filepath}"
)
return True
def _save_model(self, epoch, batch, logs):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `"epoch"`.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
filepath = self._get_file_path(epoch, batch, logs)
try:
if self._should_save_model(epoch, batch, logs, filepath):
# Create host directory if it doesn't exist.
dirname = os.path.dirname(filepath)
if dirname and not file_utils.exists(dirname):
file_utils.makedirs(dirname)
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"finished saving model to {filepath}"
)
except IsADirectoryError: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: {filepath}"
)
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking the content of
# `e.args[0]`.
if "is a directory" in str(e.args[0]).lower():
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: f{filepath}"
)
# Re-throw the error for any other causes.
raise e
def _get_file_path(self, epoch, batch, logs):
"""Returns the file path for checkpoint."""
try:
# `filepath` may contain placeholders such as
# `{epoch:02d}`,`{batch:02d}` and `{mape:.2f}`. A mismatch between
# logged metrics and the path's placeholders can cause formatting to
# fail.
if batch is None or "batch" in logs:
file_path = self.filepath.format(epoch=epoch + 1, **logs)
else:
file_path = self.filepath.format(
epoch=epoch + 1, batch=batch + 1, **logs
)
except KeyError as e:
raise KeyError(
f'Failed to format this callback filepath: "{self.filepath}". '
f"Reason: {e}"
)
return file_path
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
return file_utils.exists(filepath)
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
In the rare case where there are more than one pattern-matching file
having the same modified time that is most recent among all, return the
filepath that is largest (by `>` operator, lexicographically using the
numeric equivalents). This provides a tie-breaker when multiple files
are most recent. Note that a larger `filepath` can sometimes indicate a
later time of modification (for instance, when epoch/batch is used as
formatting option), but not necessarily (when accuracy or loss is used).
The tie-breaker is put in the logic as best effort to return the most
recent, and to avoid nondeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'batch{batch:02d}epoch{epoch:02d}.keras'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['batch03epoch02.keras',
'batch02epoch02.keras', 'batch01epoch01.keras']
]
for file_path in file_paths:
# Write something to each of the files
...
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Args:
pattern: The file pattern that may optionally contain python
placeholder such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`.
If `pattern` does not contain any placeholder, this returns the
filepath that exactly matches `pattern`. Returns `None` if no match
is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = f"^{re.sub(r'{.*}', r'.*', base_name)}$"
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_utils.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (
file_path_with_largest_file_name is None
or file_path > file_path_with_largest_file_name
):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found,
# reset the counter for the number of files with latest
# modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the
# most recent, increment the counter for the number of
# files with latest modified time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time,
# return the file path with the largest file name.
return file_path_with_largest_file_name
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/terminate_on_nan_test.py | keras/src/callbacks/terminate_on_nan_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import callbacks
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.callbacks import BackupAndRestore
from keras.src.callbacks import TerminateOnNaN
from keras.src.models import Sequential
from keras.src.utils import numerical_utils
@pytest.mark.requires_trainable_backend
class TerminateOnNaNTest(testing.TestCase):
"""Test suite for TerminateOnNaN callback."""
def test_TerminateOnNaN(self):
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
NUM_CLASSES = 2
BATCH_SIZE = 4
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(
layers.Dense(
2,
activation="relu",
kernel_initializer=initializer,
)
)
model.add(layers.Dense(NUM_CLASSES))
model.compile(loss="mean_squared_error", optimizer="sgd")
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[callbacks.TerminateOnNaN()],
epochs=20,
)
loss = history.history["loss"]
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
def test_terminate_on_nan_graceful_stop(self):
"""Test that TerminateOnNaN (default) gracefully stops training."""
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
x = np.array([[1.0], [2.0]])
y = np.array([[np.inf], [np.inf]])
callback = TerminateOnNaN(raise_error=False)
# Training should complete without raising RuntimeError
history = model.fit(
x, y, epochs=2, batch_size=1, callbacks=[callback], verbose=0
)
# Training should stop early
self.assertLess(len(history.history["loss"]), 4)
def test_terminate_on_nan_raise_error_raises_error(self):
"""Test that TerminateOnNaN(raise_error=True) raises
RuntimeError on NaN loss.
"""
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
x = np.array([[1.0], [2.0]])
y = np.array([[np.inf], [np.inf]])
callback = TerminateOnNaN(raise_error=True)
# Training should raise RuntimeError
with self.assertRaisesRegex(
RuntimeError,
"NaN or Inf loss encountered",
):
model.fit(
x, y, epochs=1, batch_size=1, callbacks=[callback], verbose=0
)
def test_raise_error_terminate_does_not_trigger_on_train_end(self):
"""Test that on_train_end is NOT called when
TerminateOnNaN(raise_error=True) raises.
"""
class TrackingCallback(callbacks.Callback):
def __init__(self):
super().__init__()
self.train_end_called = False
def on_train_end(self, logs=None):
self.train_end_called = True
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
x = np.array([[1.0]])
y = np.array([[np.inf]])
tracking_callback = TrackingCallback()
raise_error_terminate_callback = TerminateOnNaN(raise_error=True)
# Should raise RuntimeError
with self.assertRaises(RuntimeError):
model.fit(
x,
y,
epochs=1,
callbacks=[tracking_callback, raise_error_terminate_callback],
verbose=0,
)
# on_train_end should NOT have been called
self.assertFalse(tracking_callback.train_end_called)
def test_raise_error_terminate_preserves_backup(self):
"""Ensure BackupAndRestore directory is preserved when
TerminateOnNaN(raise_error=True) triggers.
"""
tmpdir = self.get_temp_dir()
backup_dir = os.path.join(tmpdir, "backups")
os.makedirs(backup_dir, exist_ok=True)
fake_file = os.path.join(backup_dir, "checkpoint.txt")
with open(fake_file, "w") as f:
f.write("dummy checkpoint")
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
x_nan = np.array([[1.0]])
y_nan = np.array([[np.inf]])
raise_error_terminate_callback = TerminateOnNaN(raise_error=True)
backup_callback = BackupAndRestore(backup_dir=backup_dir)
# Monkeypatch BackupAndRestore to prevent cleanup on train_end
backup_callback.on_train_end = lambda logs=None: None
# Training should raise RuntimeError
with self.assertRaises(RuntimeError):
model.fit(
x_nan,
y_nan,
epochs=1,
callbacks=[backup_callback, raise_error_terminate_callback],
verbose=0,
)
# Verify backup directory still exists and file inside is untouched
self.assertTrue(
os.path.exists(backup_dir),
f"Backup dir deleted: {backup_dir}",
)
self.assertTrue(
os.path.exists(fake_file),
"Backup file missing unexpectedly.",
)
@parameterized.named_parameters(
("raise_error_false", False),
("raise_error_true", True),
)
def test_normal_training_does_not_raise(self, raise_error):
"""Test that TerminateOnNaN does not raise on normal training."""
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
x = np.array([[1.0], [2.0]])
y = np.array([[1.0], [2.0]])
callback = TerminateOnNaN(raise_error=raise_error)
# Should complete without raising RuntimeError
history = model.fit(x, y, epochs=2, callbacks=[callback], verbose=0)
# Should have completed 2 epochs
self.assertEqual(len(history.history["loss"]), 2)
def test_raise_error_terminate_stops_on_later_batch(self):
"""Ensure TerminateOnNaN(raise_error=True) stops training
if NaN appears in later batch.
"""
model = models.Sequential([layers.Dense(1, input_shape=(1,))])
model.compile(optimizer="sgd", loss="mse")
# Batch 1: normal loss, Batch 2: NaN loss
x = np.array([[1.0], [2.0]])
y = np.array([[1.0], [np.inf]]) # NaN/Inf appears only in 2nd batch
callback = TerminateOnNaN(raise_error=True)
with self.assertRaises(RuntimeError) as exc:
model.fit(
x, y, epochs=1, batch_size=1, callbacks=[callback], verbose=0
)
self.assertTrue(any(f"batch {i}" in str(exc.exception) for i in [0, 1]))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/callback_list.py | keras/src/callbacks/callback_list.py | import concurrent.futures
from keras.src import backend
from keras.src import tree
from keras.src import utils
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.callbacks.history import History
from keras.src.callbacks.progbar_logger import ProgbarLogger
from keras.src.utils import python_utils
@keras_export("keras.callbacks.CallbackList")
class CallbackList(Callback):
"""Container abstracting a list of callbacks."""
def __init__(
self,
callbacks=None,
add_history=False,
add_progbar=False,
model=None,
**params,
):
"""Container for `Callback` instances.
This object wraps a list of `Callback` instances, making it possible
to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`).
Args:
callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one
does not already exist in the `callbacks` list.
add_progbar: Whether a `ProgbarLogger` callback should be added, if
one does not already exist in the `callbacks` list.
model: The `Model` these callbacks are used with.
**params: If provided, parameters will be passed to each `Callback`
via `Callback.set_params`.
"""
self.callbacks = tree.flatten(callbacks) if callbacks else []
self._in_begin_end_block_count = 0
self._executor = None
self._async_train = False
self._async_test = False
self._async_predict = False
self._futures = []
self._configure_async_dispatch(callbacks)
self._add_default_callbacks(add_history, add_progbar)
self.set_model(model)
self.set_params(params)
def set_params(self, params):
self.params = params
if params:
for callback in self.callbacks:
callback.set_params(params)
def _configure_async_dispatch(self, callbacks):
# Determine whether callbacks can be dispatched asynchronously.
if not backend.IS_THREAD_SAFE:
return
async_train = True
async_test = True
async_predict = True
if callbacks:
if isinstance(callbacks, (list, tuple)):
for cbk in callbacks:
if getattr(cbk, "async_safe", False):
# Callbacks that expose self.async_safe == True
# will be assumed safe for async dispatch.
continue
if not utils.is_default(cbk.on_batch_end):
async_train = False
if not utils.is_default(cbk.on_train_batch_end):
async_train = False
if not utils.is_default(cbk.on_test_batch_end):
async_test = False
if not utils.is_default(cbk.on_predict_batch_end):
async_predict = False
self._async_train = async_train
self._async_test = async_test
self._async_predict = async_predict
def _add_default_callbacks(self, add_history, add_progbar):
"""Adds `Callback`s that are always present."""
self._progbar = None
self._history = None
for cb in self.callbacks:
if isinstance(cb, ProgbarLogger):
self._progbar = cb
elif isinstance(cb, History):
self._history = cb
if self._history is None and add_history:
self._history = History()
self.callbacks.append(self._history)
if self._progbar is None and add_progbar:
self._progbar = ProgbarLogger()
self.callbacks.append(self._progbar)
def set_model(self, model):
if not model:
return
super().set_model(model)
if self._history:
model.history = self._history
for callback in self.callbacks:
callback.set_model(model)
def _on_begin(self):
"""Called by `on_train/test/predict_begin`.
Start the executor for async calls if needed.
"""
self._in_begin_end_block_count += 1
if (
self._in_begin_end_block_count == 1
and (self._async_train or self._async_test or self._async_predict)
and self._executor is None
):
self._executor = concurrent.futures.ThreadPoolExecutor()
def _on_end(self):
"""Called by `on_train/test/predict_end`.
Shutdown the executor for async calls if all begin/end blocks completed.
"""
self._in_begin_end_block_count -= 1
if self._in_begin_end_block_count < 0:
raise ValueError(
"`on_xxx_end` called without corresponding `on_xxx_begin`"
)
if self._in_begin_end_block_count == 0 and self._executor is not None:
self._executor.shutdown()
self._executor = None
def _async_dispatch(self, fn, *args):
for future in self._futures:
if future.done():
future.result()
self._futures.remove(future)
future = self._executor.submit(fn, *args)
self._futures.append(future)
def _flush_futures(self):
"""Waits for all futures to complete and clears the list."""
for future in self._futures:
future.result()
self._futures = []
def on_batch_begin(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_batch_begin(batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(self, epoch, logs=None):
if self._async_train:
self._flush_futures()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_train_batch_begin(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_test_batch_begin(batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_predict_batch_begin(batch, logs=logs)
def on_batch_end(self, batch, logs=None):
if self._async_train:
self._async_dispatch(self._on_batch_end, batch, logs)
else:
self._on_batch_end(batch, logs)
def on_train_batch_end(self, batch, logs=None):
if self._async_train:
self._async_dispatch(self._on_train_batch_end, batch, logs)
else:
self._on_train_batch_end(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if self._async_test:
self._async_dispatch(self._on_test_batch_end, batch, logs)
else:
self._on_test_batch_end(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
if self._async_predict:
self._async_dispatch(self._on_predict_batch_end, batch, logs)
else:
self._on_predict_batch_end(batch, logs)
def _on_batch_end(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_batch_end(batch, logs=logs)
def _on_train_batch_end(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_train_batch_end(batch, logs=logs)
def _on_test_batch_end(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_test_batch_end(batch, logs=logs)
def _on_predict_batch_end(self, batch, logs=None):
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_predict_batch_end(batch, logs=logs)
def on_train_begin(self, logs=None):
self._on_begin()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
if self._async_train:
self._flush_futures()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_train_end(logs)
self._on_end()
def on_test_begin(self, logs=None):
self._on_begin()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
if self._async_test:
self._flush_futures()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_test_end(logs)
self._on_end()
def on_predict_begin(self, logs=None):
self._on_begin()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
if self._async_predict:
self._flush_futures()
logs = python_utils.pythonify_logs(logs)
for callback in self.callbacks:
callback.on_predict_end(logs)
self._on_end()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/tensorboard_test.py | keras/src/callbacks/tensorboard_test.py | import collections
import os
import random
import sys
import numpy as np
import pytest
import tensorflow.summary as summary
from tensorflow.compat.v1 import SummaryMetadata
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from keras.src import backend
from keras.src import callbacks
from keras.src import layers
from keras.src import losses
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
from keras.src.optimizers import schedules
# Note: this file and tensorboard in general has a dependency on tensorflow
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple("_ObservedSummary", ("logdir", "tag"))
class _SummaryIterator:
"""Yields `Event` protocol buffers from a given path."""
def __init__(self, path):
self._tf_record_iterator = tf_record.tf_record_iterator(path)
def __iter__(self):
return self
def __next__(self):
r = next(self._tf_record_iterator)
return event_pb2.Event.FromString(r)
next = __next__
class _SummaryFile:
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
self.convert_from_v2_summary_proto = False
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for dirpath, _, filenames in os.walk(logdir):
for filename in filenames:
if not filename.startswith("events.out."):
continue
path = os.path.join(dirpath, filename)
for event in _SummaryIterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata
# because the Keras callback uses `summary_ops_v2` to emit
# old-style summaries. See b/124535134.
kind = value.WhichOneof("value")
container = {
"simple_value": result.scalars,
"image": result.images,
"histo": result.histograms,
"tensor": result.tensors,
}.get(kind)
if container is None:
raise ValueError(
"Unexpected summary kind %r in event file %s:\n%r"
% (kind, path, event)
)
elif kind == "tensor" and tag != "keras":
# Convert the tf2 summary proto to old style for type
# checking.
plugin_name = value.metadata.plugin_data.plugin_name
container = {
"images": result.images,
"histograms": result.histograms,
"scalars": result.scalars,
}.get(plugin_name)
if container is not None:
result.convert_from_v2_summary_proto = True
else:
container = result.tensors
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
class TestTensorBoardV2(testing.TestCase):
def _get_log_dirs(self):
logdir = os.path.join(
self.get_temp_dir(), str(random.randint(1, int(1e7))), "tb"
)
train_dir = os.path.join(logdir, "train")
validation_dir = os.path.join(logdir, "validation")
return logdir, train_dir, validation_dir
def _get_model(self, compile_model=True):
model = models.Sequential(
[
layers.Input((10, 10, 1)),
layers.Flatten(),
layers.Dense(1),
]
)
if compile_model:
model.compile("sgd", "mse")
return model
@pytest.mark.requires_trainable_backend
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free."""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, _ = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
model.fit(x, y, batch_size=2, epochs=2, callbacks=[tb_cbk])
events_file_run_basenames = set()
for dirpath, _, filenames in os.walk(train_dir):
if any(fn.startswith("events.out.") for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {"train"})
@pytest.mark.requires_trainable_backend
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = optimizers.SGD(schedules.CosineDecay(0.01, 1))
model.compile(opt, "mse")
logdir, train_dir, _ = self._get_log_dirs()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[callbacks.TensorBoard(logdir)],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = optimizers.SGD(schedules.CosineDecay(0.01, 1))
model.compile(opt, "mse")
logdir, train_dir, _ = self._get_log_dirs()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
callbacks.TensorBoard(
logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True,
)
],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=train_dir, tag="epoch_steps_per_second"
),
_ObservedSummary(
logdir=train_dir, tag="batch_steps_per_second"
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, histogram_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, "sequential"),
{_ObservedSummary(logdir=train_dir, tag="histogram")},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_weight_images(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (10, 10, 1)
x_shape = (10, 10, 10, 1)
else:
input_shape = (1, 10, 10)
x_shape = (10, 1, 10, 10)
x, y = np.ones(x_shape), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir, histogram_freq=1, write_images=True
)
model_type = "sequential"
model = models.Sequential(
[
layers.Input(input_shape),
layers.Conv2D(3, 10),
layers.GlobalAveragePooling2D(),
layers.Dense(1),
]
)
model.compile("sgd", "mse")
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=train_dir, tag="histogram"),
},
)
expected_image_summaries = {
_ObservedSummary(logdir=train_dir, tag="bias/image"),
_ObservedSummary(logdir=train_dir, tag="kernel/image"),
}
self.assertEqual(
self._strip_variable_names(summary_file.images),
expected_image_summaries,
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_projector_callback(self):
model = models.Sequential(
[
layers.Input((10,)),
layers.Embedding(10, 10, name="test_embedding"),
layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer="adam", loss=losses.BinaryCrossentropy(from_logits=True)
)
x, y = np.ones((10, 10)), np.ones((10, 10))
logdir, _, _ = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir,
embeddings_freq=1,
embeddings_metadata={"test_embedding": "metadata.tsv"},
)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
with open(os.path.join(logdir, "projector_config.pbtxt")) as f:
self.assertEqual(
f.readlines(),
[
"embeddings {\n",
" tensor_name: "
'"layer_with_weights-0/embeddings/.ATTRIBUTES/'
'VARIABLE_VALUE"\n',
' metadata_path: "metadata.tsv"\n',
"}\n",
],
)
@pytest.mark.requires_trainable_backend
def test_custom_summary(self):
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular
deps."""
metadata = SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = "scalars"
with summary.experimental.summary_scope(
name, "scalar_summary", values=[data, step]
) as (tag, _):
tensor = backend.convert_to_tensor(data, dtype="float32")
if backend.backend() == "torch":
# TODO: Use device scope after the API is added.
if tensor.is_cuda:
tensor = tensor.cpu()
summary.write(
tag=tag,
tensor=tensor,
step=step,
metadata=metadata,
)
class LayerWithSummary(layers.Layer):
def call(self, x):
scalar_v2_mock("custom_summary", ops.sum(x))
return x
model = models.Sequential(
[
layers.Input((5,)),
LayerWithSummary(),
]
)
# summary ops not compatible with XLA
model.compile("sgd", "mse", jit_compile=False)
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(
x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk]
)
summary_file = list_summaries(logdir)
# TODO: tensorflow will tag with model/layer_with_summary/custom_summary
# Jax will only use custom_summary tag
self.assertEqual(
self._strip_to_only_final_name(summary_file.scalars),
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
_ObservedSummary(
logdir=train_dir,
tag="custom_summary",
),
_ObservedSummary(
logdir=validation_dir,
tag="custom_summary",
),
},
)
# self.assertEqual(
# summary_file.scalars,
# {
# _ObservedSummary(logdir=train_dir, tag="batch_loss"),
# _ObservedSummary(logdir=train_dir, tag="epoch_loss"),
# _ObservedSummary(logdir=validation_dir,
# tag="epoch_loss"),
# _ObservedSummary(
# logdir=validation_dir,
# tag="evaluation_loss_vs_iterations",
# ),
# _ObservedSummary(
# logdir=train_dir,
# tag="model/layer_with_summary/custom_summary",
# ),
# _ObservedSummary(
# logdir=validation_dir,
# tag="model/layer_with_summary/custom_summary",
# ),
# },
# )
def _strip_to_only_final_name(self, summaries):
"""Removes all leading names in a summary
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values striped of all
name except for the terminal one.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
result.add(s)
else:
new_tag = s.tag.split("/")[-1]
result.add(s._replace(tag=new_tag))
return result
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
raise ValueError(f"tag has no layer name: {s.tag!r}")
start_from = 2 if "subclass" in model_type else 1
new_tag = "/".join(s.tag.split("/")[start_from:])
result.add(s._replace(tag=new_tag))
return result
def _strip_variable_names(self, summaries):
"""Remove `variable_n` from summary tag
`variable_n` tag names are added with random numbers. Removing them
ensures deterministic tag names.
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
result.add(s)
else:
split_tag = s.tag.split("/")
if "variable" in split_tag[0]:
result.add(s._replace(tag=split_tag[-1]))
else:
result.add(s)
return result
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Torch backend requires blocking numpy conversion.",
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_non_blocking(self):
logdir, _, _ = self._get_log_dirs()
model = models.Sequential([layers.Dense(1)])
model.optimizer = optimizers.Adam()
tb = callbacks.TensorBoard(logdir)
cb_list = callbacks.CallbackList(
[tb], model=model, epochs=1, steps=100, verbose=0
)
tensor = ops.convert_to_tensor(1.0)
def mock_numpy():
raise RuntimeError(
"If this error is seen, TensorBoard is causing a blocking "
"NumPy conversion."
)
tensor.numpy = mock_numpy
logs = {"metric": tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def _count_xplane_file(self, logdir):
profile_dir = os.path.join(logdir, "plugins", "profile")
count = 0
for dirpath, dirnames, filenames in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith(".xplane.pb"):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir, write_graph=True, profile_batch=0
)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=train_dir, tag="keras"),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if "input" not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_write_sequential_model_no_input_shape(self):
# TODO: Requires to_json implementation in trainer
# model = models.Sequential(
# [
# Conv2D(8, (3, 3)),
# Flatten(),
# Dense(1),
# ]
# )
# model.compile("sgd", "mse")
# self.fitModelAndAssertKerasModelWritten(model)
pass
def test_TensorBoard_write_sequential_model_with_input_shape(self):
# TODO: Requires to_json implementation in trainer
# model = models.Sequential(
# [
# Input(input_shape=(10, 10, 1)),
# Conv2D(8, (3, 3)),
# Flatten(),
# Dense(1),
# ]
# )
# model.compile("sgd", "mse")
# self.fitModelAndAssertKerasModelWritten(model)
pass
def test_TensorBoard_write_model(self):
# TODO: Requires to_json implementation in trainer
# See https://github.com/keras-team/keras/blob/ \
# a8d4a7f1ffc9de3c5932828a107e4e95e8803fb4/ \
# keras/engine/training.py#L3313
# inputs = Input([10, 10, 1])
# x = Conv2D(8, (3, 3), activation="relu")(inputs)
# x = Flatten()(x)
# x = Dense(1)(x)
# model = models.Model(inputs=inputs, outputs=[x])
# model.compile("sgd", "mse")
# breakpoint()
# self.fitModelAndAssertKerasModelWritten(model)
pass
@pytest.mark.skipif(
backend.backend() not in ("jax", "tensorflow"),
reason="The profiling test can only run with TF and JAX backends.",
)
def test_TensorBoard_auto_trace(self):
logdir, train_dir, validation_dir = self._get_log_dirs()
model = models.Sequential(
[
layers.Input((10, 10, 1)),
layers.Flatten(),
layers.Dense(1),
]
)
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
if backend.backend() == "jax" and sys.version_info[1] < 12:
with pytest.warns(match="backend requires python >= 3.12"):
callbacks.TensorBoard(
logdir, histogram_freq=1, profile_batch=1, write_graph=False
)
self.skipTest(
"Profiling with JAX and python < 3.12 "
"raises segmentation fault."
)
tb_cbk = callbacks.TensorBoard(
logdir, histogram_freq=1, profile_batch=1, write_graph=False
)
model.compile("sgd", "mse")
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=train_dir, tag="batch_1"),
},
)
self.assertEqual(1, self._count_xplane_file(logdir=train_dir))
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/callback.py | keras/src/callbacks/callback.py | from keras.src import backend
from keras.src import utils
from keras.src.api_export import keras_export
@keras_export("keras.callbacks.Callback")
class Callback:
"""Base class used to build new callbacks.
Callbacks can be passed to keras methods such as `fit()`, `evaluate()`, and
`predict()` in order to hook into the various stages of the model training,
evaluation, and inference lifecycle.
To create a custom callback, subclass `keras.callbacks.Callback` and
override the method associated with the stage of interest.
Example:
>>> training_finished = False
>>> class MyCallback(Callback):
... def on_train_end(self, logs=None):
... global training_finished
... training_finished = True
>>> model = Sequential([
... layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(np.array([[1.0]]), np.array([[1.0]]),
... callbacks=[MyCallback()])
>>> assert training_finished == True
If you want to use `Callback` objects in a custom training loop:
1. You should pack all your callbacks into a single `callbacks.CallbackList`
so they can all be called together.
2. You will need to manually call all the `on_*` methods at the appropriate
locations in your loop. Like this:
Example:
```python
callbacks = keras.callbacks.CallbackList([...])
callbacks.append(...)
callbacks.on_train_begin(...)
for epoch in range(EPOCHS):
callbacks.on_epoch_begin(epoch)
for i, data in dataset.enumerate():
callbacks.on_train_batch_begin(i)
batch_logs = model.train_step(data)
callbacks.on_train_batch_end(i, batch_logs)
epoch_logs = ...
callbacks.on_epoch_end(epoch, epoch_logs)
final_logs=...
callbacks.on_train_end(final_logs)
```
Attributes:
params: Dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: Instance of `Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch (see method-specific docstrings).
"""
def __init__(self):
self.params = None
self._model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self._model = model
@property
def model(self):
if backend.backend() == "torch":
from torch.nn.parallel import DistributedDataParallel
if isinstance(self._model, DistributedDataParallel):
# Keras Callbacks expect to work with Keras models. e.g
# ModelCheckpoint and EarlyStopping both attempt to call
# keras-specific APIs on the value returned from this
# property. If this callback was created against a DDP
# wrapper instead of the underlying keras.Model, it is
# likely to fail. Return self._model.module for DDP
# instances instead.
return self._model.module
if backend.backend() == "jax" and hasattr(
self._model, "jax_state_sync"
):
# With JAX, by default the model state is not
# attached to the model in the middle of an
# epoch. We have to force a sync before
# accessing model state for e.g. checkpointing.
self._model.jax_state_sync()
return self._model
@utils.default
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
@utils.default
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
@utils.default
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should
only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should
only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with `val_`. For training epoch, the values of
the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
@utils.default
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
@utils.default
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
@utils.default
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@utils.default
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@utils.default
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_epoch_end()` is passed to this argument for this method but
that may change in the future.
"""
@utils.default
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method
but that may change in the future.
"""
@utils.default
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@utils.default
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/monitor_callback.py | keras/src/callbacks/monitor_callback.py | import warnings
from keras.src import ops
from keras.src.callbacks.callback import Callback
from keras.src.trainers import compile_utils
class MonitorCallback(Callback):
"""Base class for callbacks that monitor a quantity and evaluates
improvements.
This class provides common functionality for callbacks that monitor a
metric during training to determine whether a condition has been met,
such as improvement over time. It encapsulates logic for selecting
the comparison operation based on a `monitor` value and `mode`, and
computing whether a new value is an improvement.
It is intended to be subclassed by other callbacks like `ModelCheckpoint`,
`EarlyStopping`, or `ReduceLROnPlateau`, and is not meant to be used
directly.
Arguments:
monitor: Quantity to be monitored. Defaults to `"val_loss"`.
mode: One of `{"auto", "min", "max"}`. In `min` mode, training will aim
to minimize the monitored quantity; in `'max'` mode it will aim to
maximize it.; in `"auto"` mode, the direction is automatically
inferred from the name of the monitored quantity. Defaults to
`"auto"`.
baseline: Floating point initial "best" value of the metric to be
monitored. If `None` (default), the first monitored value will be
used.
min_delta: Minimum change in the monitored quantity to qualify as an
improvement, i.e. an absolute change of less than min_delta, will
count as no improvement. Defaults to `0`.
Raises:
ValueError: If `mode='auto'` is selected and the direction of the metric
cannot be inferred.
"""
def __init__(
self,
monitor="val_loss",
mode="auto",
baseline=None,
min_delta=0,
):
super().__init__()
if mode not in ["auto", "min", "max"]:
warnings.warn(
f"{self.__class__.__name__} mode '{mode}' is unknown, fallback "
"to auto mode.",
stacklevel=2,
)
mode = "auto"
self.monitor = monitor
self.mode = mode
self.best = baseline
self.min_delta = abs(min_delta)
self.monitor_op = None
def _set_monitor_op(self):
if self.mode == "min":
self.monitor_op = ops.less
elif self.mode == "max":
self.monitor_op = ops.greater
else:
metric_name = self.monitor.removeprefix("val_")
if metric_name == "loss":
self.monitor_op = ops.less
if hasattr(self.model, "metrics"):
all_metrics = []
for m in self.model.metrics:
if isinstance(
m,
(
compile_utils.CompileMetrics,
compile_utils.MetricsList,
),
):
all_metrics.extend(m.metrics)
for m in all_metrics:
if m.name == metric_name:
if hasattr(m, "_direction"):
if m._direction == "up":
self.monitor_op = ops.greater
else:
self.monitor_op = ops.less
if self.monitor_op is None:
raise ValueError(
f"{self.__class__.__name__} callback received "
f"monitor={self.monitor}, but Keras isn't able to "
"automatically determine whether that metric should be "
"maximized or minimized. Pass `mode='max'` in order to "
"monitor based on the highest metric value, or pass "
"`mode='min'` in order to use the lowest value."
)
if self.monitor_op == ops.less:
self.min_delta *= -1
def _is_improvement(self, monitor_value, reference_value):
if reference_value is None:
return True
return self.monitor_op(monitor_value - self.min_delta, reference_value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/early_stopping_test.py | keras/src/callbacks/early_stopping_test.py | import numpy as np
import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import testing
class EarlyStoppingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_early_stopping(self):
x_train = np.random.random((10, 5))
y_train = np.random.random((10, 1))
x_test = np.random.random((10, 5))
y_test = np.random.random((10, 1))
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(
loss="mae",
optimizer="adam",
metrics=[
"mse",
"acc",
"accuracy",
"hinge",
metrics.F1Score(name="f1_score"),
],
)
cases = [
("max", "val_mse", "max"),
("min", "val_loss", "min"),
("auto", "val_mse", "min"),
("auto", "loss", "min"),
("auto", "acc", "max"),
("auto", "val_accuracy", "max"),
("auto", "hinge", "min"),
("auto", "f1_score", "max"),
]
for mode, monitor, expected_mode in cases:
patience = 0
cbks = [
callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode
)
]
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
if expected_mode == "max":
monitor_op = ops.greater
else:
monitor_op = ops.less
self.assertEqual(cbks[0].monitor_op, monitor_op)
with self.assertRaises(ValueError):
cbks = [
callbacks.EarlyStopping(patience=patience, monitor="unknown")
]
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
@pytest.mark.requires_trainable_backend
def test_early_stopping_patience(self):
cases = [0, 1, 2, 3]
losses = [10.0, 9.0, 8.0, 9.0, 8.9, 8.8, 8.7, 8.6, 8.5]
for patience in cases:
stopper = callbacks.EarlyStopping(monitor="loss", patience=patience)
stopper.set_model(models.Sequential())
stopper.model.compile(loss="mse", optimizer="sgd")
stopper.on_train_begin()
for epoch, loss in enumerate(losses):
stopper.on_epoch_end(epoch=epoch, logs={"loss": loss})
if stopper.model.stop_training:
break
self.assertEqual(stopper.stopped_epoch, max(patience, 1) + 2)
@pytest.mark.requires_trainable_backend
def test_early_stopping_reuse(self):
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(
optimizer="sgd",
loss="mae",
metrics=["mse"],
)
stopper = callbacks.EarlyStopping(monitor="mse", patience=patience)
history1 = model.fit(
data, labels, callbacks=[stopper], verbose=0, epochs=20
)
self.assertGreaterEqual(len(history1.epoch), patience)
history2 = model.fit(
data, labels, callbacks=[stopper], verbose=0, epochs=20
)
self.assertGreaterEqual(len(history2.epoch), patience)
@pytest.mark.requires_trainable_backend
def test_early_stopping_with_baseline(self):
baseline = 0.6
x_train = np.random.random((10, 5))
y_train = np.random.random((10, 1))
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(optimizer="sgd", loss="mae", metrics=["mse"])
patience = 3
stopper = callbacks.EarlyStopping(
monitor="mse", patience=patience, baseline=baseline
)
hist = model.fit(
x_train, y_train, callbacks=[stopper], verbose=0, epochs=20
)
assert len(hist.epoch) >= patience
def test_early_stopping_final_weights_when_restoring_model_weights(self):
class DummyModel:
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(
monitor="val_loss", patience=2, restore_best_weights=True
)
early_stop.set_model(DummyModel())
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={"val_loss": losses[epoch]})
if early_stop.model.stop_training:
break
early_stop.on_train_end()
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
# Check early stopping when no model beats the baseline.
early_stop = callbacks.EarlyStopping(
monitor="val_loss",
patience=5,
baseline=0.5,
restore_best_weights=True,
)
early_stop.set_model(DummyModel())
losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]
# The best configuration is in the epoch 2 (loss = 0.7000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={"val_loss": losses[epoch]})
if early_stop.model.stop_training:
break
early_stop.on_train_end()
# No epoch improves on the baseline, so we should train for only 5
# epochs, and restore the second model.
self.assertEqual(epochs_trained, 5)
self.assertEqual(early_stop.model.get_weights(), 2)
# Check weight restoration when another callback requests a stop.
early_stop = callbacks.EarlyStopping(
monitor="val_loss",
patience=5,
baseline=0.5,
restore_best_weights=True,
)
early_stop.set_model(DummyModel())
losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]
# The best configuration is in the epoch 2 (loss = 0.7000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={"val_loss": losses[epoch]})
if epoch == 3:
early_stop.model.stop_training = True
if early_stop.model.stop_training:
break
early_stop.on_train_end()
# We should restore the second model.
self.assertEqual(epochs_trained, 4)
self.assertEqual(early_stop.model.get_weights(), 2)
@pytest.mark.requires_trainable_backend
def test_early_stopping_with_start_from_epoch(self):
x_train = np.random.random((10, 5))
y_train = np.random.random((10, 1))
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(optimizer="sgd", loss="mae", metrics=["mse"])
start_from_epoch = 2
patience = 3
stopper = callbacks.EarlyStopping(
monitor="mse",
patience=patience,
start_from_epoch=start_from_epoch,
)
history = model.fit(
x_train, y_train, callbacks=[stopper], verbose=0, epochs=20
)
# Test 'patience' argument functions correctly when used
# in conjunction with 'start_from_epoch'.
self.assertGreaterEqual(len(history.epoch), patience + start_from_epoch)
start_from_epoch = 2
patience = 0
stopper = callbacks.EarlyStopping(
monitor="mse",
patience=patience,
start_from_epoch=start_from_epoch,
)
history = model.fit(
x_train, y_train, callbacks=[stopper], verbose=0, epochs=20
)
# Test for boundary condition when 'patience' = 0.
self.assertGreaterEqual(len(history.epoch), start_from_epoch)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/monitor_callback_test.py | keras/src/callbacks/monitor_callback_test.py | import numpy as np
import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import testing
class MonitorCallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_monitor_op_logic(self):
x_train = np.random.random((10, 5))
y_train = np.random.random((10, 1))
x_test = np.random.random((10, 5))
y_test = np.random.random((10, 1))
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(
loss="mae",
optimizer="adam",
metrics=[
"mse",
"acc",
"accuracy",
"hinge",
metrics.F1Score(name="f1_score"),
],
)
cases = [
("max", "val_mse", "max"),
("min", "val_loss", "min"),
("auto", "val_mse", "min"),
("auto", "loss", "min"),
("auto", "acc", "max"),
("auto", "val_accuracy", "max"),
("auto", "hinge", "min"),
("auto", "f1_score", "max"),
]
for mode, monitor, expected_mode in cases:
monitor_callback = callbacks.MonitorCallback(monitor, mode)
monitor_callback.set_model(model)
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
epochs=2,
verbose=0,
)
monitor_callback._set_monitor_op()
if expected_mode == "max":
monitor_op = ops.greater
else:
monitor_op = ops.less
self.assertEqual(monitor_callback.monitor_op, monitor_op)
with self.assertRaises(ValueError):
monitor = "unknown"
monitor_callback = callbacks.MonitorCallback(monitor)
monitor_callback.set_model(model)
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
epochs=2,
verbose=0,
)
monitor_callback._set_monitor_op()
@pytest.mark.requires_trainable_backend
def test_min_delta(self):
monitor_callback = callbacks.MonitorCallback(mode="max", min_delta=0.5)
monitor_callback._set_monitor_op()
self.assertTrue(monitor_callback._is_improvement(0.75, 0))
self.assertTrue(monitor_callback._is_improvement(0.5, None))
self.assertFalse(monitor_callback._is_improvement(0.5, 0))
self.assertFalse(monitor_callback._is_improvement(0.2, 0.5))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/tensorboard.py | keras/src/callbacks/tensorboard.py | import logging
import os
import sys
import time
import warnings
from keras.src import backend
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.layers import Embedding
from keras.src.optimizers import Optimizer
from keras.src.utils import file_utils
@keras_export("keras.callbacks.TensorBoard")
class TensorBoard(Callback):
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow. A TensorFlow
installation is required to use this callback.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Weight histograms
* Sampled profiling
When used in `model.evaluate()` or regular validation
in addition to epoch summaries, there will be a summary that records
evaluation metrics vs `model.optimizer.iterations` written. The metric names
will be prepended with `evaluation`, with `model.optimizer.iterations` being
the step in the visualized TensorBoard.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. e.g.,
`log_dir = os.path.join(working_dir, 'logs')`.
This directory should not be reused by any other callbacks.
histogram_freq: frequency (in epochs) at which to compute
weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: (Not supported at this time)
Whether to visualize the graph in TensorBoard.
Note that the log file can become quite large
when `write_graph` is set to `True`.
write_images: whether to write model weights to visualize as image in
TensorBoard.
write_steps_per_second: whether to log the training steps per second
into TensorBoard. This supports both epoch and batch frequency
logging.
update_freq: `"batch"` or `"epoch"` or integer. When using `"epoch"`,
writes the losses and metrics to TensorBoard after every epoch.
If using an integer, let's say `1000`, all metrics and losses
(including custom ones added by `Model.compile`) will be logged to
TensorBoard every 1000 batches. `"batch"` is a synonym for 1,
meaning that they will be written every batch.
Note however that writing too frequently to TensorBoard can slow
down your training, especially when used with distribution
strategies as it will incur additional synchronization overhead.
Batch-level summary writing is also available via `train_step`
override. Please see
[TensorBoard Scalars tutorial](
https://www.tensorflow.org/tensorboard/scalars_and_keras#batch-level_logging)
for more details.
profile_batch: Profile the batch(es) to sample compute characteristics.
profile_batch must be a non-negative integer or a tuple of integers.
A pair of positive integers signify a range of batches to profile.
By default, profiling is disabled.
embeddings_freq: frequency (in epochs) at which embedding layers will be
visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: Dictionary which maps embedding layer names to the
filename of a file in which to save metadata for the embedding layer.
In case the same metadata file is to be
used for all embedding layers, a single filename can be passed.
Examples:
```python
tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./logs")
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Then run the tensorboard command to view the visualizations.
```
Custom batch-level summaries in a subclassed Model:
```python
class MyModel(keras.Model):
def build(self, _):
self.dense = keras.layers.Dense(10)
def call(self, x):
outputs = self.dense(x)
tf.summary.histogram('outputs', outputs)
return outputs
model = MyModel()
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N
# batches. In addition to any `tf.summary` contained in `model.call()`,
# metrics added in `Model.compile` will be logged every N batches.
tb_callback = keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Custom batch-level summaries in a Functional API Model:
```python
def my_summary(x):
tf.summary.histogram('x', x)
return x
inputs = keras.Input(10)
x = keras.layers.Dense(10)(inputs)
outputs = keras.layers.Lambda(my_summary)(x)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N
# batches. In addition to any `tf.summary` contained in `Model.call`,
# metrics added in `Model.compile` will be logged every N batches.
tb_callback = keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Profiling:
```python
# Profile a single batch, e.g. the 5th batch.
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=5)
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Profile a range of batches, e.g. from 10 to 20.
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=(10,20))
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
```
""" # noqa: E501
def __init__(
self,
log_dir="logs",
histogram_freq=0,
write_graph=True,
write_images=False,
write_steps_per_second=False,
update_freq="epoch",
profile_batch=0,
embeddings_freq=0,
embeddings_metadata=None,
):
super().__init__()
self.log_dir = str(log_dir)
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
self.write_steps_per_second = write_steps_per_second
self.update_freq = 1 if update_freq == "batch" else update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
if profile_batch:
if backend.backend() not in ("jax", "tensorflow"):
# TODO: profiling not available in torch, numpy
raise ValueError(
"Profiling is not yet available with the "
f"{backend.backend()} backend. Please open a PR "
"if you'd like to add this feature. Received: "
f"profile_batch={profile_batch} (must be 0)"
)
elif backend.backend() == "jax":
if sys.version_info[1] < 12:
warnings.warn(
"Profiling with the "
f"{backend.backend()} backend requires python >= 3.12."
)
profile_batch = 0
self._init_profile_batch(profile_batch)
self._global_train_batch = 0
self._global_test_batch = 0
self._previous_epoch_iterations = 0
self._train_accumulated_time = 0
self._batch_start_time = 0
self._summary_module = None
# Lazily initialized in order to avoid creating event files when
# not needed.
self._writers = {}
# Used to restore any existing `SummaryWriter` after training ends.
self._prev_summary_state = []
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self._model = model
self._log_write_dir = self.log_dir
self._train_dir = os.path.join(self._log_write_dir, "train")
self._val_dir = os.path.join(self._log_write_dir, "validation")
self._writers = {} # Resets writers.
self._should_write_train_graph = False
if self.write_graph:
self._write_keras_model_summary()
self._should_write_train_graph = True
if self.embeddings_freq:
self._configure_embeddings()
@property
def summary(self):
if self._summary_module is None:
import tensorflow.summary as summary
self._summary_module = summary
return self._summary_module
@property
def _train_writer(self):
if "train" not in self._writers:
self._writers["train"] = self.summary.create_file_writer(
self._train_dir
)
return self._writers["train"]
@property
def _val_writer(self):
if "val" not in self._writers:
self._writers["val"] = self.summary.create_file_writer(
self._val_dir
)
return self._writers["val"]
def _write_keras_model_train_graph(self):
"""Writes Keras model train_function graph to TensorBoard."""
with self._train_writer.as_default():
train_fn = self.model.train_function
# If the train_function is a `tf.function`, we can write out a
# graph
if hasattr(train_fn, "function_spec"):
# TODO(b/243822285): Use _variable_creation_fn directly.
if hasattr(train_fn, "_concrete_stateful_fn"):
self.summary.graph(train_fn._concrete_stateful_fn.graph)
else:
self.summary.graph(
train_fn._concrete_variable_creation_fn.graph
)
def _write_keras_model_summary(self):
"""Writes Keras graph network summary to TensorBoard."""
with self._train_writer.as_default():
if (
self.model.__class__.__name__ == "Functional"
or self.model.__class__.__name__ == "Sequential"
):
keras_model_summary("keras", self.model, step=0)
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
from google.protobuf import text_format
from tensorboard.plugins import projector
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, Embedding):
embedding = config.embeddings.add()
# Embeddings are always the first layer, so this naming should
# be consistent in any keras models checkpoints.
name = (
"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
)
embedding.tensor_name = name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in self.embeddings_metadata.keys():
embedding.metadata_path = (
self.embeddings_metadata.pop(layer.name)
)
if self.embeddings_metadata and not isinstance(
self.embeddings_metadata, str
):
raise ValueError(
"Unrecognized `Embedding` layer names passed to "
"`keras.callbacks.TensorBoard` `embeddings_metadata` "
f"argument: {self.embeddings_metadata.keys()}"
)
config_pbtxt = text_format.MessageToString(config)
path = os.path.join(self._log_write_dir, "projector_config.pbtxt")
with file_utils.File(path, "w") as f:
f.write(config_pbtxt)
def _push_writer(self, writer, step):
"""Sets the default writer for custom batch-level summaries."""
if self.update_freq == "epoch":
return
def should_record():
return step % self.update_freq == 0
summary_context = (
writer.as_default(step),
self.summary.record_if(should_record),
)
self._prev_summary_state.append(summary_context)
summary_context[0].__enter__()
summary_context[1].__enter__()
def _pop_writer(self):
"""Pops the current writer."""
if self.update_freq == "epoch":
return
# See _push_writer for the content of the previous_context, which is
# pair of context.
previous_context = self._prev_summary_state.pop()
previous_context[1].__exit__(*sys.exc_info())
previous_context[0].__exit__(*sys.exc_info())
def _close_writers(self):
for writer in self._writers.values():
writer.close()
def _init_profile_batch(self, profile_batch):
"""Validate profile_batch value and set the range of batches to profile.
Sets values of _start_batch and _stop_batch attributes,
specifying the start and stop batch to profile.
Setting `profile_batch=0` disables profiling.
Args:
profile_batch: The range of batches to profile. Should be a
non-negative integer or a comma separated string of pair of positive
integers. A pair of positive integers signify a range of batches to
profile.
Raises:
ValueError: If profile_batch is not an integer or a comma separated
pair of positive integers.
"""
profile_batch_error_message = (
"profile_batch must be a non-negative integer or "
"2-tuple of positive "
"integers. A pair of positive integers "
"signifies a range of batches "
f"to profile. Found: {profile_batch}"
)
# Support legacy way of specifying "start,stop" or "start" as str.
if isinstance(profile_batch, str):
profile_batch = str(profile_batch).split(",")
profile_batch = tree.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif (
isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2
):
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
self._batch_trace_context = None
if self._start_batch > 0:
# Warm up and improve the profiling accuracy.
self._start_profiler(logdir="")
self._stop_profiler(save=False)
# True when a trace is running.
self._is_tracing = False
# Setting `profile_batch=0` disables profiling.
self._should_trace = not (
self._start_batch == 0 and self._stop_batch == 0
)
def on_train_begin(self, logs=None):
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._push_writer(self._train_writer, self._global_train_batch)
def on_train_end(self, logs=None):
self._pop_writer()
if self._is_tracing:
self._stop_trace()
self._close_writers()
def on_test_begin(self, logs=None):
self._push_writer(self._val_writer, self._global_test_batch)
def on_test_end(self, logs=None):
if self.model.optimizer and hasattr(self.model.optimizer, "iterations"):
with self._val_writer.as_default():
for name, value in logs.items():
self.summary.scalar(
f"evaluation_{name}_vs_iterations",
value,
step=self.model.optimizer.iterations,
)
self._pop_writer()
def on_train_batch_begin(self, batch, logs=None):
self._global_train_batch += 1
if self.write_steps_per_second:
self._batch_start_time = time.time()
if not self._should_trace:
return
if self._global_train_batch == self._start_batch:
self._start_trace()
if self._profiler_started:
self._batch_trace_context = backend.tensorboard.start_batch_trace(
batch
)
def on_train_batch_end(self, batch, logs=None):
if self._should_write_train_graph:
self._write_keras_model_train_graph()
self._should_write_train_graph = False
if self.write_steps_per_second:
batch_run_time = time.time() - self._batch_start_time
self.summary.scalar(
"batch_steps_per_second",
1.0 / batch_run_time,
step=self._global_train_batch,
)
# `logs` isn't necessarily always a dict
if isinstance(logs, dict):
for name, value in logs.items():
self.summary.scalar(
f"batch_{name}", value, step=self._global_train_batch
)
if not self._should_trace:
return
if self._is_tracing:
if self._profiler_started and self._batch_trace_context is not None:
backend.tensorboard.stop_batch_trace(self._batch_trace_context)
self._batch_trace_context = None
if self._global_train_batch >= self._stop_batch:
self._stop_trace()
def on_test_batch_begin(self, batch, logs=None):
self._global_test_batch += 1
def on_epoch_begin(self, epoch, logs=None):
# Keeps track of epoch for profiling.
if self.write_steps_per_second:
self._previous_epoch_iterations = ops.convert_to_tensor(
self.model.optimizer.iterations, "float32"
)
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
self._log_epoch_metrics(epoch, logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def _start_trace(self):
self.summary.trace_on(graph=True, profiler=False)
self._start_profiler(logdir=self._train_dir)
self._is_tracing = True
def _stop_trace(self, batch=None):
"""Logs the trace graph to TensorBoard."""
if batch is None:
batch = self._stop_batch
with self._train_writer.as_default():
# TODO(b/126388999): Remove step info in the summary name.
self.summary.trace_export(name="batch_%d" % batch, step=batch)
self._stop_profiler()
self._is_tracing = False
def _collect_learning_rate(self, logs):
if isinstance(self.model.optimizer, Optimizer):
logs["learning_rate"] = float(
ops.convert_to_numpy(self.model.optimizer.learning_rate)
)
return logs
def _compute_steps_per_second(self):
current_iteration = self.model.optimizer.iterations
time_since_epoch_begin = time.time() - self._epoch_start_time
current_iteration = ops.convert_to_tensor(current_iteration, "float32")
time_since_epoch_begin = ops.convert_to_tensor(
time_since_epoch_begin, "float32"
)
steps_per_second = (
current_iteration - self._previous_epoch_iterations
) / time_since_epoch_begin
return float(steps_per_second)
def _log_epoch_metrics(self, epoch, logs):
"""Writes epoch metrics out as scalar summaries.
Args:
epoch: Int. The global step to use for TensorBoard.
logs: Dict. Keys are scalar summary names, values are scalars.
"""
if not logs:
return
train_logs = {k: v for k, v in logs.items() if not k.startswith("val_")}
val_logs = {k: v for k, v in logs.items() if k.startswith("val_")}
train_logs = self._collect_learning_rate(train_logs)
if self.write_steps_per_second:
train_logs["steps_per_second"] = self._compute_steps_per_second()
if train_logs:
with self._train_writer.as_default():
for name, value in train_logs.items():
self.summary.scalar(f"epoch_{name}", value, step=epoch)
if val_logs:
with self._val_writer.as_default():
for name, value in val_logs.items():
name = name[4:] # Remove 'val_' prefix.
self.summary.scalar(f"epoch_{name}", value, step=epoch)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
with self._train_writer.as_default():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(":", "_")
# Add a suffix to prevent summary tag name collision.
histogram_weight_name = f"{weight_name}/histogram"
self.summary.histogram(
histogram_weight_name, weight, step=epoch
)
if self.write_images:
# Add a suffix to prevent summary tag name
# collision.
image_weight_name = f"{weight_name}/image"
self._log_weight_as_image(
weight, image_weight_name, epoch
)
self._train_writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = ops.squeeze(weight)
shape = w_img.shape
if len(shape) == 1: # Bias case
w_img = ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = ops.transpose(w_img)
shape = w_img.shape
w_img = ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if backend.image_data_format() == "channels_last":
# Switch to channels_first to display every kernel as a separate
# image.
w_img = ops.transpose(w_img, [2, 0, 1])
shape = w_img.shape
w_img = ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
w_img = backend.convert_to_numpy(w_img)
shape = w_img.shape
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
self.summary.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(
self._log_write_dir,
"train",
f"keras_embedding.ckpt-{epoch}.weights.h5",
)
self.model.save_weights(embeddings_ckpt)
def _start_profiler(self, logdir):
"""Starts the profiler if currently inactive.
Args:
logdir: Directory where profiler results will be saved.
"""
if self._profiler_started:
return
try:
backend.tensorboard.start_trace(logdir)
self._profiler_started = True
except Exception as e:
# Profiler errors should not be fatal.
logging.error("Failed to start profiler: %s", e)
def _stop_profiler(self, save=True):
"""Stops the profiler if currently active.
Args:
save: Whether to save the profiler results to TensorBoard.
"""
if not self._profiler_started:
return
try:
backend.tensorboard.stop_trace(save=save)
except Exception as e:
# Profiler errors should not be fatal.
logging.error("Failed to stop profiler: %s", e)
finally:
self._profiler_started = False
def keras_model_summary(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model
fails to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary.
If omitted, this defaults to `tf.summary.experimental.get_step()`,
which must not be `None`.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is `None`.
"""
import tensorflow.summary as summary
from tensorflow.compat.v1 import SummaryMetadata
summary_metadata = SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc:
# An exception should not break a model code.
warnings.warn(f"Model failed to serialize as JSON. Ignoring... {exc}")
return False
with summary.experimental.summary_scope(
name, "graph_keras_model", [data, step]
) as (tag, _):
return summary.write(
tag=tag, tensor=json_string, step=step, metadata=summary_metadata
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/orbax_checkpoint_test.py | keras/src/callbacks/orbax_checkpoint_test.py | import os
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.callbacks.orbax_checkpoint import OrbaxCheckpoint
from keras.src.utils.module_utils import ocp
# Import advanced Orbax functionality directly from the LazyModule
Checkpointer = ocp.training.Checkpointer
save_pytree = ocp.save_pytree
load_pytree = ocp.load_pytree
preservation_policies = ocp.training.preservation_policies
save_decision_policies = ocp.training.save_decision_policies
class OrbaxCheckpointTest(testing.TestCase):
def _create_test_model(self):
"""Create a simple test model."""
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(5, name="dense_layer")(inputs)
outputs = layers.Dense(1, name="output_layer")(x)
model = models.Model(inputs, outputs, name="test_model")
model.compile(optimizer="adam", loss="mse")
return model
def _create_dummy_data(self, num_samples=100):
"""Create dummy training data."""
x = np.random.randn(num_samples, 10)
y = np.random.randn(num_samples, 1)
return x, y
@pytest.mark.requires_trainable_backend
def test_save_freq_batch(self):
"""Test batch-level saving."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=50)
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_batch_freq")
callback = OrbaxCheckpoint(directory=checkpoint_dir, save_freq=10)
# Train for one epoch with batch saving
model.fit(x, y, epochs=1, batch_size=5, callbacks=[callback], verbose=0)
# Wait for async operations to complete before cleanup
callback.wait_until_finished()
# Check that checkpoint files were created
# With 50 samples, batch_size=5, and save_freq=10, there are 10 batches.
# The callback should save at the end of batch 9 (step 10, since
# _total_batches_seen is 1-indexed).
checkpoint_files = os.listdir(checkpoint_dir)
# Should have at least one checkpoint file
self.assertGreater(
len(checkpoint_files),
0,
f"Should have checkpoint files, found {checkpoint_files}",
)
# Check for the specific step 10 checkpoint
step_10_dir = os.path.join(checkpoint_dir, "10")
self.assertTrue(
os.path.exists(step_10_dir),
f"Step 10 checkpoint should exist at {step_10_dir}",
)
@pytest.mark.requires_trainable_backend
def test_directory_creation(self):
"""Test that checkpoint directory is created if it doesn't exist."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_create_dir", "subdir"
)
callback = OrbaxCheckpoint(directory=checkpoint_dir, save_freq="epoch")
# Directory should be created during training
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
self.assertTrue(
os.path.exists(checkpoint_dir),
"Checkpoint directory should be created",
)
# Wait for async operations to complete before test cleanup
callback.wait_until_finished()
@pytest.mark.requires_trainable_backend
def test_save_best_only(self):
"""Test save_best_only functionality with different modes."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=100)
# Test with mode='min' (save when loss decreases)
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_save_best_min")
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
monitor="loss",
save_best_only=True,
mode="min",
save_freq="epoch",
)
# Train for multiple epochs - should only save when loss improves
model.fit(x, y, epochs=5, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Check that checkpoint directory exists and has files
checkpoint_files = os.listdir(checkpoint_dir)
self.assertGreater(
len(checkpoint_files), 0, "Should have at least one checkpoint"
)
# Test with mode='max' (save when accuracy increases)
checkpoint_dir_max = os.path.join(
self.get_temp_dir(), "test_save_best_max"
)
callback_max = OrbaxCheckpoint(
directory=checkpoint_dir_max,
monitor="loss", # Using loss with mode=max
save_best_only=True,
mode="max",
save_freq="epoch",
)
model.fit(x, y, epochs=3, callbacks=[callback_max], verbose=0)
callback_max.wait_until_finished()
checkpoint_files_max = os.listdir(checkpoint_dir_max)
self.assertGreater(
len(checkpoint_files_max), 0, "Should have at least one checkpoint"
)
@pytest.mark.requires_trainable_backend
def test_save_weights_only(self):
"""Test save_weights_only parameter."""
model = self._create_test_model()
x, y = self._create_dummy_data()
# Test save_weights_only=True
checkpoint_dir_weights = os.path.join(
self.get_temp_dir(), "test_weights_only"
)
callback_weights = OrbaxCheckpoint(
directory=checkpoint_dir_weights,
save_weights_only=True,
save_freq="epoch",
)
model.fit(x, y, epochs=1, callbacks=[callback_weights], verbose=0)
callback_weights.wait_until_finished()
# Check that checkpoint was created
checkpoint_files = os.listdir(checkpoint_dir_weights)
self.assertGreater(
len(checkpoint_files), 0, "Should have checkpoint files"
)
# Test save_weights_only=False (default - saves optimizer state)
checkpoint_dir_full = os.path.join(
self.get_temp_dir(), "test_full_save"
)
callback_full = OrbaxCheckpoint(
directory=checkpoint_dir_full,
save_weights_only=False,
save_freq="epoch",
)
model.fit(x, y, epochs=1, callbacks=[callback_full], verbose=0)
callback_full.wait_until_finished()
checkpoint_files_full = os.listdir(checkpoint_dir_full)
self.assertGreater(
len(checkpoint_files_full), 0, "Should have checkpoint files"
)
@pytest.mark.requires_trainable_backend
def test_save_freq_epoch(self):
"""Test save_freq='epoch' functionality."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_epoch_freq")
# Use synchronous saving to avoid async issues with multiple saves
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=False,
)
# Train for 3 epochs
model.fit(x, y, epochs=3, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Should have only the latest checkpoint (epoch 2) due to max_to_keep=1
checkpoint_files = os.listdir(checkpoint_dir)
self.assertEqual(
len(checkpoint_files),
1,
f"Should have exactly 1 checkpoint due to max_to_keep=1, "
f"found {len(checkpoint_files)}",
)
# Check for the latest epoch directory (epoch 2)
epoch_dir = os.path.join(checkpoint_dir, "2")
self.assertTrue(
os.path.exists(epoch_dir),
"Epoch 2 checkpoint should exist (latest due to max_to_keep=1)",
)
@pytest.mark.requires_trainable_backend
def test_max_to_keep(self):
"""Test max_to_keep parameter limits number of checkpoints."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_max_keep")
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq="epoch", max_to_keep=2
)
# Train for 5 epochs
model.fit(x, y, epochs=5, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Should only keep the 2 most recent checkpoints
checkpoint_files = os.listdir(checkpoint_dir)
# Orbax may keep more than max_to_keep in some cases
self.assertLessEqual(
len(checkpoint_files),
5,
f"Should not have more than 5 checkpoints, "
f"found {len(checkpoint_files)}",
)
@pytest.mark.requires_trainable_backend
def test_save_on_background_sync(self):
"""Test save_on_background=False for synchronous saving."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_sync_save")
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=False, # Synchronous saving
)
# Train and ensure it completes (synchronous save should not block)
model.fit(x, y, epochs=2, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Check that checkpoints were created
checkpoint_files = os.listdir(checkpoint_dir)
self.assertGreater(
len(checkpoint_files), 0, "Should have checkpoint files"
)
def test_invalid_save_freq(self):
"""Test error handling for invalid save_freq parameter."""
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_invalid_freq")
with self.assertRaises(ValueError):
OrbaxCheckpoint(directory=checkpoint_dir, save_freq="invalid")
@pytest.mark.requires_trainable_backend
def test_initial_value_threshold(self):
"""Test initial_value_threshold parameter."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_threshold")
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
monitor="loss",
save_best_only=True,
mode="min",
initial_value_threshold=1.0, # High threshold
save_freq="epoch",
)
# Train - should only save if loss goes below 1.0
model.fit(x, y, epochs=3, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Check that checkpoint directory exists
# (may or may not have files depending on loss)
self.assertTrue(
os.path.exists(checkpoint_dir), "Checkpoint directory should exist"
)
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading(self):
"""Test that saved checkpoints can be loaded and weights restored."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_loading")
callback = OrbaxCheckpoint(directory=checkpoint_dir, save_freq="epoch")
# Train for 1 epoch to save checkpoint
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Get original weights after training
original_weights = model.get_weights()
# Create a new model with same architecture
new_model = self._create_test_model()
# Load the checkpoint
checkpoint_path = os.path.join(checkpoint_dir, "0") # epoch 0
loaded_state = load_pytree(checkpoint_path)
# Set the state back to the new model
# The loaded_state has 'trainable_variables' key
new_model.set_state_tree(
{"trainable_variables": loaded_state["trainable_variables"]}
)
# Compare weights
loaded_weights = new_model.get_weights()
for orig, loaded in zip(original_weights, loaded_weights):
np.testing.assert_array_almost_equal(orig, loaded)
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading_weights_only(self):
"""Test loading checkpoints saved with save_weights_only=True."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_loading_weights"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq="epoch", save_weights_only=True
)
# Train for 1 epoch to save checkpoint
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Get original weights after training
original_weights = model.get_weights()
# Create a new model with same architecture
new_model = self._create_test_model()
# Load the checkpoint
checkpoint_path = os.path.join(checkpoint_dir, "0") # epoch 0
loaded_state = load_pytree(checkpoint_path)
# For save_weights_only, the state should only have trainable_variables
new_model.set_state_tree(
{"trainable_variables": loaded_state["trainable_variables"]}
)
# Compare weights
loaded_weights = new_model.get_weights()
for orig, loaded in zip(original_weights, loaded_weights):
np.testing.assert_array_almost_equal(orig, loaded)
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading_with_optimizer_state(self):
"""Test loading checkpoints that include optimizer state."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=200)
# More data for optimizer state
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_loading_optimizer"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq="epoch", save_weights_only=False
)
# Train for 1 epoch to build optimizer state
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Get original state after training
original_state_tree = model.get_state_tree()
# Create a new model with same architecture
new_model = self._create_test_model()
# Compile with same optimizer to initialize optimizer variables
new_model.compile(optimizer="adam", loss="mse")
# Run one training step to initialize optimizer variables
new_x, new_y = self._create_dummy_data(num_samples=10)
new_model.fit(new_x, new_y, epochs=1, batch_size=5, verbose=0)
# Load the checkpoint (epoch 0)
checkpoint_path = os.path.join(checkpoint_dir, "0")
loaded_state = load_pytree(checkpoint_path)
# Set the full state (weights + optimizer) back to the new model
new_model.set_state_tree(
{
"trainable_variables": loaded_state["trainable_variables"],
"optimizer_variables": loaded_state["optimizer_variables"],
}
)
# Get the loaded state
loaded_state_tree = new_model.get_state_tree()
# Compare trainable variables (weights)
def compare_nested_dicts(orig_dict, loaded_dict):
"""Recursively compare nested dictionaries containing variables."""
for key in orig_dict:
if key not in loaded_dict:
self.fail(f"Key {key} missing in loaded state")
orig_val = orig_dict[key]
loaded_val = loaded_dict[key]
if isinstance(orig_val, dict):
compare_nested_dicts(orig_val, loaded_val)
else:
# Handle different array types: JAX arrays, TF variables,
# PyTorch tensors, numpy arrays
if hasattr(orig_val, "numpy"):
# Could be TensorFlow variable or PyTorch tensor
try:
# Try PyTorch-style conversion first
# (detach().cpu().numpy())
orig_array = orig_val.detach().cpu().numpy()
except AttributeError:
# Not PyTorch, try TensorFlow-style conversion
orig_array = orig_val.numpy()
else:
# JAX array or numpy array - use directly
orig_array = orig_val
if hasattr(loaded_val, "numpy"):
# Could be TensorFlow variable or PyTorch tensor
try:
# Try PyTorch-style conversion first
# (detach().cpu().numpy())
loaded_array = loaded_val.detach().cpu().numpy()
except AttributeError:
# Not PyTorch, try TensorFlow-style conversion
loaded_array = loaded_val.numpy()
else:
# JAX array or numpy array - use directly
loaded_array = loaded_val
np.testing.assert_array_almost_equal(
orig_array, loaded_array
)
compare_nested_dicts(
original_state_tree["trainable_variables"],
loaded_state_tree["trainable_variables"],
)
# Compare optimizer variables
compare_nested_dicts(
original_state_tree["optimizer_variables"],
loaded_state_tree["optimizer_variables"],
)
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading_with_metrics_state(self):
"""Test loading checkpoints that include metrics state."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=200)
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_loading_metrics"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq="epoch", save_weights_only=False
)
# Train for 1 epoch to build metrics state
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
callback.wait_until_finished()
# Get original state after training
original_state_tree = model.get_state_tree()
# Create a new model with same architecture and compile with metrics
new_model = self._create_test_model()
new_model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# Run one training step to initialize metrics variables
new_x, new_y = self._create_dummy_data(num_samples=10)
new_model.fit(new_x, new_y, epochs=1, batch_size=5, verbose=0)
# Load the checkpoint (epoch 0)
checkpoint_path = os.path.join(checkpoint_dir, "0")
loaded_state = load_pytree(checkpoint_path)
# Set the full state (weights + optimizer + metrics) to new model
new_model.set_state_tree(
{
"trainable_variables": loaded_state["trainable_variables"],
"non_trainable_variables": loaded_state[
"non_trainable_variables"
],
"optimizer_variables": loaded_state["optimizer_variables"],
"metrics_variables": loaded_state["metrics_variables"],
}
)
# Get the loaded state
loaded_state_tree = new_model.get_state_tree()
# Compare trainable variables (weights)
def compare_nested_dicts(orig_dict, loaded_dict):
"""Recursively compare nested dictionaries containing variables."""
for key in orig_dict:
if key not in loaded_dict:
self.fail(f"Key {key} missing in loaded state")
orig_val = orig_dict[key]
loaded_val = loaded_dict[key]
if isinstance(orig_val, dict):
compare_nested_dicts(orig_val, loaded_val)
else:
# Handle different array types: JAX arrays, TF variables,
# PyTorch tensors, numpy arrays
if hasattr(orig_val, "numpy"):
# Could be TensorFlow variable or PyTorch tensor
try:
# Try PyTorch-style conversion first
# (detach().cpu().numpy())
orig_array = orig_val.detach().cpu().numpy()
except AttributeError:
# Not PyTorch, try TensorFlow-style conversion
orig_array = orig_val.numpy()
else:
# JAX array or numpy array - use directly
orig_array = orig_val
if hasattr(loaded_val, "numpy"):
# Could be TensorFlow variable or PyTorch tensor
try:
# Try PyTorch-style conversion first
# (detach().cpu().numpy())
loaded_array = loaded_val.detach().cpu().numpy()
except AttributeError:
# Not PyTorch, try TensorFlow-style conversion
loaded_array = loaded_val.numpy()
else:
# JAX array or numpy array - use directly
loaded_array = loaded_val
np.testing.assert_array_almost_equal(
orig_array, loaded_array
)
compare_nested_dicts(
original_state_tree["trainable_variables"],
loaded_state_tree["trainable_variables"],
)
# Compare non-trainable variables
compare_nested_dicts(
original_state_tree["non_trainable_variables"],
loaded_state_tree["non_trainable_variables"],
)
# Compare optimizer variables
compare_nested_dicts(
original_state_tree["optimizer_variables"],
loaded_state_tree["optimizer_variables"],
)
# Compare metrics variables
compare_nested_dicts(
original_state_tree["metrics_variables"],
loaded_state_tree["metrics_variables"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/progbar_logger.py | keras/src/callbacks/progbar_logger.py | from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import io_utils
from keras.src.utils.progbar import Progbar
@keras_export("keras.callbacks.ProgbarLogger")
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self):
super().__init__()
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._called_in_fit = False
def set_params(self, params):
verbose = params["verbose"]
if verbose == "auto":
verbose = 1
self.verbose = verbose
self.epochs = params["epochs"]
self.target = params["steps"]
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
io_utils.print_msg(f"Epoch {epoch + 1}/{self.epochs}")
def on_train_batch_end(self, batch, logs=None):
self._update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
# Don't pass prediction results.
self._update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
if self.progbar is None:
self.progbar = Progbar(
target=self.target, verbose=self.verbose, unit_name="step"
)
def _update_progbar(self, batch, logs=None):
"""Updates the progbar."""
logs = logs or {}
self._maybe_init_progbar()
self.seen = batch + 1 # One-indexed.
if self.verbose == 1:
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs):
logs = logs or {}
if self.target is None:
self.target = self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/lambda_callback.py | keras/src/callbacks/lambda_callback.py | from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
@keras_export("keras.callbacks.LambdaCallback")
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
- `on_train_batch_begin` and `on_train_batch_end` expect a positional
argument `batch` and a keyword argument `logs`
- See `Callback` class definition for the full list of functions and their
expected arguments.
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
on_train_batch_begin: called at the beginning of every train batch.
on_train_batch_end: called at the end of every train batch.
kwargs: Any function in `Callback` that you want to override by
passing `function_name=function`. For example,
`LambdaCallback(.., on_train_end=train_end_fn)`. The custom function
needs to have same arguments as the ones defined in `Callback`.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_train_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(
self,
on_epoch_begin=None,
on_epoch_end=None,
on_train_begin=None,
on_train_end=None,
on_train_batch_begin=None,
on_train_batch_end=None,
**kwargs,
):
super().__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
if on_train_begin is not None:
self.on_train_begin = on_train_begin
if on_train_end is not None:
self.on_train_end = on_train_end
if on_train_batch_begin is not None:
self.on_train_batch_begin = on_train_batch_begin
if on_train_batch_end is not None:
self.on_train_batch_end = on_train_batch_end
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/learning_rate_scheduler.py | keras/src/callbacks/learning_rate_scheduler.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import io_utils
@keras_export("keras.callbacks.LearningRateScheduler")
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
At the beginning of every epoch, this callback gets the updated learning
rate value from `schedule` function provided at `__init__`, with the current
epoch and current learning rate, and applies the updated learning rate on
the optimizer.
Args:
schedule: A function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new
learning rate as output (float).
verbose: Integer. 0: quiet, 1: log update messages.
Example:
>>> # This function keeps the initial learning rate for the first ten epochs
>>> # and decreases it exponentially after that.
>>> def scheduler(epoch, lr):
... if epoch < 10:
... return lr
... else:
... return lr * ops.exp(-0.1)
>>>
>>> model = keras.models.Sequential([keras.layers.Dense(10)])
>>> model.compile(keras.optimizers.SGD(), loss='mse')
>>> round(model.optimizer.learning_rate, 5)
0.01
>>> callback = keras.callbacks.LearningRateScheduler(scheduler)
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=15, callbacks=[callback], verbose=0)
>>> round(model.optimizer.learning_rate, 5)
0.00607
"""
def __init__(self, schedule, verbose=0):
super().__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "learning_rate"):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
try: # new API
learning_rate = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
learning_rate = self.schedule(epoch, learning_rate)
except TypeError: # Support for old API for backward compatibility
learning_rate = self.schedule(epoch)
if not isinstance(learning_rate, (float, np.float32, np.float64)):
raise ValueError(
"The output of the `schedule` function should be a float. "
f"Got: {learning_rate}"
)
self.model.optimizer.learning_rate = learning_rate
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: LearningRateScheduler setting learning "
f"rate to {learning_rate}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs["learning_rate"] = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/swap_ema_weights.py | keras/src/callbacks/swap_ema_weights.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
@keras_export("keras.callbacks.SwapEMAWeights")
class SwapEMAWeights(Callback):
"""Swaps model weights and EMA weights before and after evaluation.
This callbacks replaces the model's weight values with the values of
the optimizer's EMA weights (the exponential moving average of the past
model weights values, implementing "Polyak averaging") before model
evaluation, and restores the previous weights after evaluation.
The `SwapEMAWeights` callback is to be used in conjunction with
an optimizer that sets `use_ema=True`.
Note that the weights are swapped in-place in order to save memory.
The behavior is undefined if you modify the EMA weights
or model weights in other callbacks.
Example:
```python
# Remember to set `use_ema=True` in the optimizer
optimizer = SGD(use_ema=True)
model.compile(optimizer=optimizer, loss=..., metrics=...)
# Metrics will be computed with EMA weights
model.fit(X_train, Y_train, callbacks=[SwapEMAWeights()])
# If you want to save model checkpoint with EMA weights, you can set
# `swap_on_epoch=True` and place ModelCheckpoint after SwapEMAWeights.
model.fit(
X_train,
Y_train,
callbacks=[SwapEMAWeights(swap_on_epoch=True), ModelCheckpoint(...)]
)
```
Args:
swap_on_epoch: whether to perform swapping at `on_epoch_begin()`
and `on_epoch_end()`. This is useful if you want to use
EMA weights for other callbacks such as `ModelCheckpoint`.
Defaults to `False`.
"""
def __init__(self, swap_on_epoch=False):
super().__init__()
self.swap_on_epoch = swap_on_epoch
self._ema_weights_in_model = False
def _tf_swap_variables(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
if isinstance(var, backend.Variable):
var = var.value
if isinstance(average_var, backend.Variable):
average_var = average_var.value
# swap using addition to prevent variable creation
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: a.assign_add(b),
args=(average_var,),
)
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: b.assign(a - b),
args=(average_var,),
)
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: a.assign(a - b),
args=(average_var,),
)
def _backend_swap_variables(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
temporary_variable = ops.convert_to_numpy(var)
var.assign(average_var)
average_var.assign(temporary_variable)
def _tf_finalize_ema_values(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
if isinstance(var, backend.Variable):
var = var.value
if isinstance(average_var, backend.Variable):
average_var = average_var.value
optimizer._distribution_strategy.extended.update(
average_var,
lambda a, b: a.assign(b),
args=(var,),
)
def _backend_finalize_ema_values(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
average_var.assign(var)
def _swap_variables(self):
if hasattr(self.model.optimizer, "inner_optimizer"):
# LossScaleOptimizer
optimizer = self.model.optimizer.inner_optimizer
else:
optimizer = self.model.optimizer
if not hasattr(optimizer, "_model_variables_moving_average"):
raise ValueError(
"SwapEMAWeights must be used when "
"`use_ema=True` is set on the optimizer. "
f"Received: use_ema={optimizer.use_ema}"
)
if backend.backend() == "tensorflow":
self._tf_swap_variables(optimizer)
else:
self._backend_swap_variables(optimizer)
def _finalize_ema_values(self):
if hasattr(self.model.optimizer, "inner_optimizer"):
# LossScaleOptimizer
optimizer = self.model.optimizer.inner_optimizer
else:
optimizer = self.model.optimizer
if not hasattr(optimizer, "_model_variables_moving_average"):
raise ValueError(
"SwapEMAWeights must be used when "
"`use_ema=True` is set on the optimizer. "
f"Received: use_ema={optimizer.use_ema}"
)
if backend.backend() == "tensorflow":
self._tf_finalize_ema_values(optimizer)
else:
self._backend_finalize_ema_values(optimizer)
def on_epoch_begin(self, epoch, logs=None):
if self.swap_on_epoch and self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
def on_epoch_end(self, epoch, logs=None):
if self.swap_on_epoch and not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
# We need to recover EMA weights from the previously swapped weights
# in the last epoch. This is because, at the end of the fitting,
# `finalize_variable_values` will be called to assign
# `_model_variables_moving_average` to `trainable_variables`.
if epoch == self.params["epochs"] - 1:
self._finalize_ema_values()
def on_test_begin(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
def on_test_end(self, logs=None):
if self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
def on_predict_begin(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
def on_predict_end(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/__init__.py | keras/src/callbacks/__init__.py | from keras.src.callbacks.backup_and_restore import BackupAndRestore
from keras.src.callbacks.callback import Callback
from keras.src.callbacks.callback_list import CallbackList
from keras.src.callbacks.csv_logger import CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping
from keras.src.callbacks.history import History
from keras.src.callbacks.lambda_callback import LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler
from keras.src.callbacks.model_checkpoint import ModelCheckpoint
from keras.src.callbacks.monitor_callback import MonitorCallback
from keras.src.callbacks.orbax_checkpoint import OrbaxCheckpoint
from keras.src.callbacks.progbar_logger import ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
from keras.src.callbacks.remote_monitor import RemoteMonitor
from keras.src.callbacks.swap_ema_weights import SwapEMAWeights
from keras.src.callbacks.tensorboard import TensorBoard
from keras.src.callbacks.terminate_on_nan import TerminateOnNaN
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/csv_logger_test.py | keras/src/callbacks/csv_logger_test.py | import csv
import os
import re
import tempfile
import numpy as np
import pytest
from keras.src import callbacks
from keras.src import initializers
from keras.src import layers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.utils import numerical_utils
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
BATCH_SIZE = 4
class CSVLoggerTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_CSVLogger(self):
OUTPUT_DIM = 1
np.random.seed(1337)
temp_dir = tempfile.TemporaryDirectory()
filepath = os.path.join(temp_dir.name, "log.tsv")
sep = "\t"
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.random((TRAIN_SAMPLES, OUTPUT_DIM))
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.random((TEST_SAMPLES, OUTPUT_DIM))
def make_model():
np.random.seed(1337)
model = Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(OUTPUT_DIM),
]
)
model.compile(
loss="mse",
optimizer="sgd",
metrics=["mse"],
)
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall("epoch", output)) == 1
os.remove(filepath)
# case 3, Verify Val. loss also registered when Validation Freq > 1
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
validation_freq=3,
callbacks=cbks,
epochs=5,
verbose=0,
)
assert os.path.exists(filepath)
# Verify that validation loss is registered at val. freq
with open(filepath) as csvfile:
rows = csv.DictReader(csvfile, delimiter=sep)
for idx, row in enumerate(rows, 1):
self.assertIn("val_loss", row)
if idx == 3:
self.assertEqual(
row["val_loss"], str(hist.history["val_loss"][0])
)
else:
self.assertEqual(row["val_loss"], "NA")
@pytest.mark.requires_trainable_backend
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN
# callback does not result in invalid CSVs.
tmpdir = tempfile.TemporaryDirectory()
csv_logfile = os.path.join(tmpdir.name, "csv_logger.csv")
NUM_CLASSES = 2
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(
layers.Dense(
2,
activation="relu",
kernel_initializer=initializer,
)
)
model.add(layers.Dense(NUM_CLASSES))
model.compile(loss="mean_squared_error", optimizer="sgd")
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[
callbacks.TerminateOnNaN(),
callbacks.CSVLogger(csv_logfile),
],
epochs=20,
)
loss = history.history["loss"]
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
values = []
with open(csv_logfile) as f:
# On Windows, due to \r\n line ends, we may end up reading empty
# lines after each line. Skip empty lines.
values = [x for x in csv.reader(f) if x]
self.assertIn("nan", values[-1], "NaN not logged in CSV Logger.")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/orbax_checkpoint.py | keras/src/callbacks/orbax_checkpoint.py | import warnings
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.callbacks.monitor_callback import (
MonitorCallback, # For metric monitoring logic
)
from keras.src.utils.io_utils import print_msg
from keras.src.utils.module_utils import ocp
# Context and AsyncOptions are accessed through the lazy-loaded ocp module
# JAX monitoring compatibility: ensure record_scalar exists
# to prevent AttributeError in older JAX versions
try:
import jax
if not hasattr(jax.monitoring, "record_scalar"):
jax.monitoring.record_scalar = lambda *args, **kwargs: None
except ImportError:
pass
def _get_state_tree(model):
"""Get the complete model state as a nested tree structure."""
# For JAX backend, preserve native arrays for performance
# For other backends, convert to numpy arrays
if backend.backend() == "jax":
state_tree = model.get_state_tree()
did_numpy_conversion = False
else:
state_tree = model.get_state_tree(value_format="numpy_array")
did_numpy_conversion = True
# Convert numpy scalar types to Python types for Orbax compatibility
# Only needed when we did numpy conversion
if did_numpy_conversion:
def convert_scalars(obj):
if isinstance(obj, np.ndarray) and obj.ndim == 0:
# Convert 0-dimensional numpy arrays (scalars) to Python types
return obj.item()
elif isinstance(obj, np.generic):
# Convert numpy scalar types (like np.float32) to Python types
return obj.item()
else:
return obj
return tree.map_structure(convert_scalars, state_tree)
else:
return state_tree
@keras_export("keras.callbacks.OrbaxCheckpoint")
class OrbaxCheckpoint(MonitorCallback):
"""Callback to save and load model state using Orbax with a similar API to
ModelCheckpoint.
This callback saves the model's weights and optimizer state asynchronously
using Orbax, allowing training to continue without blocking for I/O.
Example:
```python
model.compile(loss=..., optimizer=..., metrics=['accuracy'])
EPOCHS = 10
checkpoint_dir = '/tmp/ckpt'
orbax_checkpoint_callback = keras.callbacks.OrbaxCheckpoint(
directory=checkpoint_dir,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model is saved at the end of every epoch, if it's the best seen so far.
model.fit(epochs=EPOCHS, callbacks=[orbax_checkpoint_callback])
# Alternatively, save checkpoints every N batches -
orbax_checkpoint_callback = keras.callbacks.OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq=100) # Save every 100 batches
model.fit(epochs=EPOCHS, callbacks=[orbax_checkpoint_callback])
```
Args:
directory: path to the directory where to save the checkpoints.
monitor: The metric name to monitor (e.g., 'val_loss').
verbose: Verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" based on the monitored quantity.
save_weights_only: if `save_weights_only=True`, only the model's
weights will be saved. Otherwise, the full model state
(weights, non-trainable variables, optimizer state, and
metrics state) will be saved. Defaults to False.
mode: one of {'auto', 'min', 'max'}. Used with `save_best_only`.
save_freq: `'epoch'` or integer. Frequency to save checkpoints.
max_to_keep: Integer, maximum number of recent checkpoints to keep.
If None, keeps all. Defaults to 1.
save_on_background: Boolean, whether to save asynchronously in the
background. Defaults to True.
initial_value_threshold: Floating point initial "best" value for the
monitor, used with `save_best_only`.
"""
def __init__(
self,
directory,
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch",
initial_value_threshold=None,
max_to_keep=1,
save_on_background=True,
):
# Ensure orbax is available
ocp.initialize()
# Initialize MonitorCallback for handling 'monitor', 'mode', 'best'
# logic
super().__init__(monitor, mode, initial_value_threshold)
self.directory = directory
self.verbose = verbose
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.max_to_keep = max_to_keep
self.save_on_background = save_on_background
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self._current_epoch = 0 # Keep track of epoch
self._total_batches_seen = 0 # Global batch counter for step tracking
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError(
f"Unrecognized save_freq: {self.save_freq}. "
"Expected save_freq are 'epoch' or integer values"
)
# --- Orbax Checkpointer Setup (V1 API) ---
policies = []
if max_to_keep is not None:
policies.append(
ocp.training.preservation_policies.LatestN(max_to_keep)
)
# Use AnyPreservationPolicy to combine them.
preservation_policy = None
if policies:
preservation_policy = (
ocp.training.preservation_policies.AnyPreservationPolicy(
policies
)
)
# Create the V1 Checkpointer with direct parameter passing
# Orbax will handle directory creation on all processes as needed
self.checkpointer = ocp.training.Checkpointer(
directory=directory,
preservation_policy=preservation_policy,
)
def _should_save_on_batch(self, batch):
"""Check if we should save on this batch."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
self._total_batches_seen += add_batches
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_checkpoint(self, step, logs=None):
"""Save a checkpoint at the given step."""
# --- Prepare Composite State (Backend-Agnostic) ---
state_tree = _get_state_tree(self.model)
# Save the nested state structures directly (preserving layer
# names and structure)
if self.save_weights_only:
composite_state = {
"trainable_variables": state_tree["trainable_variables"],
}
if "non_trainable_variables" in state_tree:
composite_state["non_trainable_variables"] = state_tree[
"non_trainable_variables"
]
else:
composite_state = state_tree
# --- Save Logic (V1 API) ---
# All processes participate in distributed checkpointing
# Checkpointer is configured to save unconditionally when
# save_pytree is called
if self.verbose > 0:
print_msg(
f"OrbaxCheckpoint: Triggering async save for step {step}..."
)
# Use a single with statement. If context_options is empty,
# Context() uses defaults.
with ocp.Context():
if self.save_on_background:
self.checkpointer.save_pytree_async(step, composite_state)
else:
self.checkpointer.save_pytree(step, composite_state)
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
# Handle save_best_only logic for batch-level saving
should_save = True
if self.save_best_only:
current = logs.get(self.monitor) if logs else None
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} "
f"available, skipping save at batch {batch}.",
stacklevel=2,
)
should_save = False
elif not self._is_improvement(current, self.best):
should_save = False
else:
# Update best value when there's improvement
self.best = current
if should_save:
# Use global batch count for Orbax save step
step = self._total_batches_seen
self._save_checkpoint(step=step, logs=logs)
def on_epoch_end(self, epoch, logs=None):
self._current_epoch = epoch
if self.monitor_op is None:
self._set_monitor_op() # From MonitorCallback
# For save_freq="epoch", save at every epoch
should_save = self.save_freq == "epoch"
# Handle save_best_only logic
if should_save and self.save_best_only:
current = logs.get(self.monitor) if logs else None
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} available, "
f"skipping save at epoch {epoch}.",
stacklevel=2,
)
should_save = False
elif not self._is_improvement(current, self.best):
should_save = False
else:
# Update best value when there's improvement
self.best = current
if should_save:
# Use epoch number as the step for Orbax save
# Keras has already made the save decision - Checkpointer will
# save unconditionally
self._save_checkpoint(step=epoch, logs=logs)
def on_train_end(self, logs=None):
# Close the Checkpointer to ensure all pending saves complete
try:
self.checkpointer.close()
except Exception:
pass # Ignore errors during cleanup
def wait_until_finished(self):
"""Wait for any in-progress checkpoint operations to complete.
This method blocks until all asynchronous checkpoint save operations
have completed. It should be called before attempting to load
checkpoints if there might be pending save operations.
"""
# Wait for any async operations to complete
if hasattr(self.checkpointer, "wait"):
self.checkpointer.wait()
else:
# Fallback for older Orbax versions that don't have wait() method
while self.checkpointer.is_saving_in_progress():
import time
time.sleep(0.1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/early_stopping.py | keras/src/callbacks/early_stopping.py | import warnings
from keras.src.api_export import keras_export
from keras.src.callbacks.monitor_callback import MonitorCallback
from keras.src.utils import io_utils
@keras_export("keras.callbacks.EarlyStopping")
class EarlyStopping(MonitorCallback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored. Defaults to `"val_loss"`.
min_delta: Minimum change in the monitored quantity to qualify as an
improvement, i.e. an absolute change of less than min_delta, will
count as no improvement. Defaults to `0`.
patience: Number of epochs with no improvement after which training will
be stopped. Defaults to `0`.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1 displays
messages when the callback takes an action. Defaults to `0`.
mode: One of `{"auto", "min", "max"}`. In `min` mode, training will stop
when the quantity monitored has stopped decreasing; in `"max"` mode
it will stop when the quantity monitored has stopped increasing; in
`"auto"` mode, the direction is automatically inferred from the name
of the monitored quantity. Defaults to `"auto"`.
baseline: Baseline value for the monitored quantity. If not `None`,
training will stop if the model doesn't show improvement over the
baseline. Defaults to `None`.
restore_best_weights: Whether to restore model weights from the epoch
with the best value of the monitored quantity. If `False`, the model
weights obtained at the last step of training are used. An epoch
will be restored regardless of the performance relative to the
`baseline`. If no epoch improves on `baseline`, training will run
for `patience` epochs and restore weights from the best epoch in
that set. Defaults to `False`.
start_from_epoch: Number of epochs to wait before starting to monitor
improvement. This allows for a warm-up period in which no
improvement is expected and thus training will not be stopped.
Defaults to `0`.
Example:
>>> callback = keras.callbacks.EarlyStopping(monitor='loss',
... patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = keras.models.Sequential([keras.layers.Dense(10)])
>>> model.compile(keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(
self,
monitor="val_loss",
min_delta=0,
patience=0,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=False,
start_from_epoch=0,
):
super().__init__(monitor, mode, min_delta=min_delta)
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
self.start_from_epoch = start_from_epoch
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best_weights = None
self.best_epoch = 0
def on_epoch_end(self, epoch, logs=None):
if self.monitor_op is None:
# Delay setup until the model's metrics are all built
self._set_monitor_op()
current = self.get_monitor_value(logs)
if current is None or epoch < self.start_from_epoch:
# If no monitor value exists or still in initial warm-up stage.
return
if self.restore_best_weights and self.best_weights is None:
# If best weights were never set,
# then the current weights are the best.
self.best_weights = self.model.get_weights()
self.best_epoch = epoch
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
self.best_epoch = epoch
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
# Only restart wait if we beat both the baseline and our previous
# best.
if self.baseline is None or self._is_improvement(
current, self.baseline
):
self.wait = 0
return
if self.wait >= self.patience and epoch > 0:
# Patience has been exceeded: stop training
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
io_utils.print_msg(
f"Epoch {self.stopped_epoch + 1}: early stopping"
)
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
io_utils.print_msg(
"Restoring model weights from "
"the end of the best epoch: "
f"{self.best_epoch + 1}."
)
self.model.set_weights(self.best_weights)
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
warnings.warn(
(
f"Early stopping conditioned on metric `{self.monitor}` "
"which is not available. "
f"Available metrics are: {','.join(list(logs.keys()))}"
),
stacklevel=2,
)
return monitor_value
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/lambda_callback_test.py | keras/src/callbacks/lambda_callback_test.py | import numpy as np
import pytest
from absl import logging
from keras.src import callbacks
from keras.src import layers
from keras.src import losses
from keras.src import optimizers
from keras.src import testing
from keras.src.models.sequential import Sequential
class LambdaCallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_callback(self):
"""Test standard LambdaCallback functionalities with training."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_begin=lambda logs: logging.warning("on_train_begin"),
on_epoch_begin=lambda epoch, logs: logging.warning(
"on_epoch_begin"
),
on_epoch_end=lambda epoch, logs: logging.warning("on_epoch_end"),
on_train_end=lambda logs: logging.warning("on_train_end"),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(any("on_train_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_end" in log for log in logs.output))
self.assertTrue(any("on_train_end" in log for log in logs.output))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_batches(self):
"""Test LambdaCallback's behavior with batch-level callbacks."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_batch_begin=lambda batch, logs: logging.warning(
"on_train_batch_begin"
),
on_train_batch_end=lambda batch, logs: logging.warning(
"on_train_batch_end"
),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(
any("on_train_batch_begin" in log for log in logs.output)
)
self.assertTrue(
any("on_train_batch_end" in log for log in logs.output)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_kwargs(self):
"""Test LambdaCallback's behavior with custom defined callback."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
model.fit(
x, y, batch_size=batch_size, epochs=1, verbose=0
) # Train briefly for evaluation to work.
def custom_on_test_begin(logs):
logging.warning("custom_on_test_begin_executed")
lambda_log_callback = callbacks.LambdaCallback(
on_test_begin=custom_on_test_begin
)
with self.assertLogs(level="WARNING") as logs:
model.evaluate(
x,
y,
batch_size=batch_size,
callbacks=[lambda_log_callback],
verbose=0,
)
self.assertTrue(
any(
"custom_on_test_begin_executed" in log
for log in logs.output
)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_no_args(self):
"""Test initializing LambdaCallback without any arguments."""
lambda_callback = callbacks.LambdaCallback()
self.assertIsInstance(lambda_callback, callbacks.LambdaCallback)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_additional_kwargs(self):
"""Test initializing LambdaCallback with non-predefined kwargs."""
def custom_callback(logs):
pass
lambda_callback = callbacks.LambdaCallback(
custom_method=custom_callback
)
self.assertTrue(hasattr(lambda_callback, "custom_method"))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_during_prediction(self):
"""Test LambdaCallback's functionality during model prediction."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
def custom_on_predict_begin(logs):
logging.warning("on_predict_begin_executed")
lambda_callback = callbacks.LambdaCallback(
on_predict_begin=custom_on_predict_begin
)
with self.assertLogs(level="WARNING") as logs:
model.predict(
x, batch_size=batch_size, callbacks=[lambda_callback], verbose=0
)
self.assertTrue(
any("on_predict_begin_executed" in log for log in logs.output)
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/reduce_lr_on_plateau.py | keras/src/callbacks/reduce_lr_on_plateau.py | import warnings
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.callbacks.monitor_callback import MonitorCallback
from keras.src.utils import io_utils
@keras_export("keras.callbacks.ReduceLROnPlateau")
class ReduceLROnPlateau(MonitorCallback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(x_train, y_train, callbacks=[reduce_lr])
```
Args:
monitor: String. Quantity to be monitored.
factor: Float. Factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: Integer. Number of epochs with no improvement after which
learning rate will be reduced.
verbose: Integer. 0: quiet, 1: update messages.
mode: String. One of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will
be reduced when the quantity monitored has stopped increasing; in
`'auto'` mode, the direction is automatically inferred from the name
of the monitored quantity.
min_delta: Float. Threshold for measuring the new optimum, to only focus
on significant changes.
cooldown: Integer. Number of epochs to wait before resuming normal
operation after the learning rate has been reduced.
min_lr: Float. Lower bound on the learning rate.
"""
def __init__(
self,
monitor="val_loss",
factor=0.1,
patience=10,
verbose=0,
mode="auto",
min_delta=1e-4,
cooldown=0,
min_lr=0.0,
**kwargs,
):
super().__init__(monitor, mode, min_delta=min_delta)
if factor >= 1.0:
raise ValueError(
"ReduceLROnPlateau does not support a factor >= 1.0. "
f"Received factor={factor}"
)
self.factor = factor
self.min_lr = min_lr
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
def _reset(self):
"""Resets wait counter and cooldown counter."""
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
if self.monitor_op is None:
# Delay setup until the model's metrics are all built
self._set_monitor_op()
logs = logs or {}
logs["learning_rate"] = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
current = logs.get(self.monitor)
if current is None:
warnings.warn(
"Learning rate reduction is conditioned on metric "
f"`{self.monitor}` which is not available. Available metrics "
f"are: {','.join(list(logs.keys()))}.",
stacklevel=2,
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self._is_improvement(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(
backend.convert_to_numpy(
self.model.optimizer.learning_rate
)
)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
self.model.optimizer.learning_rate = new_lr
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
"ReduceLROnPlateau reducing "
f"learning rate to {new_lr}."
)
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/remote_monitor.py | keras/src/callbacks/remote_monitor.py | import json
import warnings
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
try:
import requests
except ImportError:
requests = None
@keras_export("keras.callbacks.RemoteMonitor")
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. when `send_as_json=False`).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
"""
def __init__(
self,
root="http://localhost:9000",
path="/publish/epoch/end/",
field="data",
headers=None,
send_as_json=False,
):
super().__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError("RemoteMonitor requires the `requests` library.")
logs = logs or {}
send = {}
send["epoch"] = epoch
for k, v in logs.items():
# np.ndarray and np.generic are not scalar types
# therefore we must unwrap their scalar values and
# pass to the json-serializable dict 'send'
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(
self.root + self.path, json=send, headers=self.headers
)
else:
requests.post(
self.root + self.path,
{self.field: json.dumps(send)},
headers=self.headers,
)
except requests.exceptions.RequestException:
warnings.warn(
f"Could not reach RemoteMonitor root server at {self.root}",
stacklevel=2,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/csv_logger.py | keras/src/callbacks/csv_logger.py | import collections
import csv
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
"""
def __init__(self, filename, separator=",", append=False):
super().__init__()
self.sep = separator
self.filename = file_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
def on_train_begin(self, logs=None):
if self.append:
if file_utils.exists(self.filename):
with file_utils.File(self.filename, "r") as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
# ensure csv_file is None or closed before reassigning
if self.csv_file and not self.csv_file.closed:
self.csv_file.close()
self.csv_file = file_utils.File(self.filename, mode)
# Reset writer and keys
self.writer = None
self.keys = None
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif (
isinstance(k, collections.abc.Iterable)
and not is_zero_dim_ndarray
):
return f'"[{", ".join(map(str, k))}]"'
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
val_keys_found = False
for key in self.keys:
if key.startswith("val_"):
val_keys_found = True
break
if not val_keys_found and self.keys:
self.keys.extend([f"val_{k}" for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch"] + (self.keys or [])
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({"epoch": epoch})
row_dict.update(
(key, handle_value(logs.get(key, "NA"))) for key in self.keys
)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
if self.csv_file and not self.csv_file.closed:
self.csv_file.close()
self.writer = None
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/model_checkpoint_test.py | keras/src/callbacks/model_checkpoint_test.py | import os
import warnings
import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.models import Sequential
from keras.src.testing import test_utils
from keras.src.utils import numerical_utils
try:
import h5py
except ImportError:
h5py = None
TRAIN_SAMPLES = 30
TEST_SAMPLES = 30
NUM_CLASSES = 3
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class ModelCheckpointTest(testing.TestCase):
@pytest.mark.skipif(
h5py is None,
reason="`h5py` is a required dependency for `ModelCheckpoint` tests.",
)
@pytest.mark.skipif(
testing.jax_uses_gpu(),
reason="Mysterious core dump on CI after upgrading JAX",
)
@pytest.mark.requires_trainable_backend
def test_model_checkpoint_options(self):
def get_model():
model = Sequential(
[
layers.Dense(NUM_HIDDEN, activation="relu"),
layers.Dense(NUM_CLASSES, activation="softmax"),
]
)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=[metrics.Accuracy("acc")],
)
return model
model = get_model()
temp_dir = self.get_temp_dir()
# Save model to a subdir inside the temp_dir so we can test
# automatic directory creation.
filepath = os.path.join(temp_dir, "subdir", "checkpoint.keras")
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
random_seed=42,
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = numerical_utils.to_categorical(y_test, num_classes=NUM_CLASSES)
y_train = numerical_utils.to_categorical(
y_train, num_classes=NUM_CLASSES
)
# Case 1
monitor = "val_loss"
save_best_only = False
mode = "auto"
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 2
mode = "min"
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 3
mode = "max"
monitor = "val_acc"
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 4
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 5: metric not available.
cbks = [
callbacks.ModelCheckpoint(
filepath, monitor="unknown", save_best_only=True, mode="min"
)
]
with pytest.warns(UserWarning):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
# Case 6
with warnings.catch_warnings(record=True) as warning_logs:
warnings.simplefilter("always")
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode="unknown",
)
self.assertIn(
"ModelCheckpoint mode 'unknown' is unknown",
str(warning_logs[-1].message),
)
# Case 8a: `ModelCheckpoint` with an integer `save_freq`
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "checkpoint.epoch{epoch:02d}.keras")
save_best_only = False
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
)
]
self.assertFalse(os.path.exists(filepath.format(epoch=3)))
model.fit(
x_train,
y_train,
batch_size=6, # 5 batches / epoch, so should backup every 3 epochs
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=0,
)
self.assertFalse(os.path.exists(filepath.format(epoch=1)))
self.assertFalse(os.path.exists(filepath.format(epoch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=3)))
self.assertFalse(os.path.exists(filepath.format(epoch=4)))
self.assertFalse(os.path.exists(filepath.format(epoch=5)))
self.assertTrue(os.path.exists(filepath.format(epoch=6)))
self.assertFalse(os.path.exists(filepath.format(epoch=7)))
self.assertFalse(os.path.exists(filepath.format(epoch=8)))
self.assertTrue(os.path.exists(filepath.format(epoch=9)))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8b: `ModelCheckpoint` with int `save_freq` & `save_weights_only`
temp_dir = self.get_temp_dir()
filepath = os.path.join(
temp_dir, "checkpoint.epoch{epoch:02d}.weights.h5"
)
cbks = [
callbacks.ModelCheckpoint(
filepath, monitor=monitor, save_freq=15, save_weights_only=True
)
]
self.assertFalse(os.path.exists(filepath.format(epoch=3)))
model.fit(
x_train,
y_train,
batch_size=6,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=0,
)
self.assertFalse(os.path.exists(filepath.format(epoch=1)))
self.assertFalse(os.path.exists(filepath.format(epoch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=3)))
self.assertFalse(os.path.exists(filepath.format(epoch=4)))
self.assertFalse(os.path.exists(filepath.format(epoch=5)))
self.assertTrue(os.path.exists(filepath.format(epoch=6)))
self.assertFalse(os.path.exists(filepath.format(epoch=7)))
self.assertFalse(os.path.exists(filepath.format(epoch=8)))
self.assertTrue(os.path.exists(filepath.format(epoch=9)))
# Case 9: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, "Unrecognized save_freq"):
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
save_freq="invalid_save_freq",
)
# The following should not raise ValueError.
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
save_freq="epoch",
)
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
save_freq=3,
)
# Case 10a: `ModelCheckpoint` save with batch in filename.
temp_dir = self.get_temp_dir()
filepath = os.path.join(
temp_dir, "checkpoint.epoch{epoch:02d}batch{batch:02d}.keras"
)
cbks = [
callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1)
]
model.fit(
x_train,
y_train,
batch_size=15,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=1,
)
self.assertTrue(os.path.exists(filepath.format(epoch=1, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=1, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=2, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=2, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=3, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=3, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=4, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=4, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=5, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=5, batch=2)))
# Case 10b: `ModelCheckpoint` save weights with batch in filename.
temp_dir = self.get_temp_dir()
filepath = os.path.join(
temp_dir, "checkpoint.epoch{epoch:02d}batch{batch:02d}.weights.h5"
)
cbks = [
callbacks.ModelCheckpoint(
filepath, monitor=monitor, save_freq=1, save_weights_only=True
)
]
model.fit(
x_train,
y_train,
batch_size=15,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=1,
)
self.assertTrue(os.path.exists(filepath.format(epoch=1, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=1, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=2, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=2, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=3, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=3, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=4, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=4, batch=2)))
self.assertTrue(os.path.exists(filepath.format(epoch=5, batch=1)))
self.assertTrue(os.path.exists(filepath.format(epoch=5, batch=2)))
# Case 11: ModelCheckpoint saves model with initial_value_threshold
# param
mode = "max"
monitor = "val_acc"
initial_value_threshold = -0.01
save_best_only = True
filepath = os.path.join(temp_dir, "checkpoint.keras")
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 12: ModelCheckpoint saves model with initial_value_threshold
# param
mode = "auto"
monitor = "val_loss"
initial_value_threshold = None
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertTrue(os.path.exists(filepath))
os.remove(filepath)
# Case 13: ModelCheckpoint doesn't save model if loss was minimum
# earlier
mode = "min"
monitor = "val_loss"
initial_value_threshold = 0
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertFalse(os.path.exists(filepath))
# Case 14: ModelCheckpoint doesn't save model if loss was min earlier in
# auto mode
mode = "auto"
monitor = "val_loss"
initial_value_threshold = 0
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertFalse(os.path.exists(filepath))
# Case 15: ModelCheckpoint doesn't save model if auc was max earlier in
# auto mode
mode = "auto"
monitor = "val_auc"
initial_value_threshold = 1
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode,
)
]
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=[metrics.AUC()],
)
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
self.assertFalse(os.path.exists(filepath))
@pytest.mark.skipif(
h5py is None,
reason="`h5py` is a required dependency for `ModelCheckpoint` tests.",
)
@pytest.mark.requires_trainable_backend
def test_model_checkpoint_loading(self):
def get_model():
inputs = layers.Input(shape=(INPUT_DIM,), batch_size=5)
x = layers.Dense(NUM_HIDDEN, activation="relu")(inputs)
outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x)
functional_model = models.Model(inputs, outputs)
functional_model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=[metrics.Accuracy("acc")],
)
return functional_model
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
random_seed=42,
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = numerical_utils.to_categorical(y_test, num_classes=NUM_CLASSES)
y_train = numerical_utils.to_categorical(
y_train, num_classes=NUM_CLASSES
)
# Model Checkpoint load model (default)
model = get_model()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "checkpoint.model.keras")
mode = "auto"
monitor = "val_loss"
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
ref_weights = model.get_weights()
self.assertTrue(os.path.exists(filepath))
new_model = saving.load_model(filepath)
new_weights = new_model.get_weights()
self.assertEqual(len(ref_weights), len(new_weights))
for ref_w, w in zip(ref_weights, new_weights):
self.assertAllClose(ref_w, w)
# Model Checkpoint load model weights
model = get_model()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "checkpoint.weights.h5")
mode = "auto"
monitor = "val_loss"
save_best_only = True
cbks = [
callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
ref_weights = model.get_weights()
self.assertTrue(os.path.exists(filepath))
new_model = get_model()
new_model.load_weights(filepath)
new_weights = new_model.get_weights()
self.assertEqual(len(ref_weights), len(new_weights))
for ref_w, w in zip(ref_weights, new_weights):
self.assertAllClose(ref_w, w)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/learning_rate_scheduler_test.py | keras/src/callbacks/learning_rate_scheduler_test.py | import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import optimizers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.testing import test_utils
from keras.src.utils import io_utils
from keras.src.utils import numerical_utils
class LearningRateSchedulerTest(testing.TestCase):
def setUp(self):
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(3,),
num_classes=2,
)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(5), layers.Dense(2)])
model.compile(
loss="mse",
optimizer="sgd",
)
self.model = model
self.x_train = x_train
self.y_train = y_train
@pytest.mark.requires_trainable_backend
def test_updates_learning_rate(self):
lr_scheduler = callbacks.LearningRateScheduler(
lambda step: 1.0 / (2.0 + step), verbose=1
)
self.model.fit(
self.x_train,
self.y_train,
callbacks=[lr_scheduler],
epochs=1,
)
self.assertEqual(self.model.optimizer.learning_rate.value, 0.5)
@pytest.mark.requires_trainable_backend
def test_verbose_logging(self):
lr_scheduler = callbacks.LearningRateScheduler(
lambda step: 1.0 / (1.0 + step), verbose=1
)
io_utils.disable_interactive_logging()
io_utils.set_logging_verbosity("INFO")
with self.assertLogs() as logs:
self.model.fit(
self.x_train,
self.y_train,
callbacks=[lr_scheduler],
epochs=1,
)
expected_log = "LearningRateScheduler setting learning rate to 1.0"
self.assertTrue(any(expected_log in log for log in logs.output))
@pytest.mark.requires_trainable_backend
def test_schedule_dependent_on_previous_learning_rate(self):
lr_scheduler = callbacks.LearningRateScheduler(lambda step, lr: lr / 2)
initial_lr = 0.03
self.model.compile(
loss="mse",
optimizer=optimizers.Adam(initial_lr),
)
self.model.fit(
self.x_train,
self.y_train,
callbacks=[lr_scheduler],
epochs=2,
)
self.assertEqual(
self.model.optimizer.learning_rate.value, initial_lr / 4.0
)
@pytest.mark.requires_trainable_backend
def test_throws_when_optimizer_has_schedule(self):
lr_scheduler = callbacks.LearningRateScheduler(lambda step, lr: lr / 2)
self.model.compile(
loss="mse",
optimizer=optimizers.Adam(
optimizers.schedules.PolynomialDecay(
initial_learning_rate=0.1, decay_steps=10
)
),
)
with self.assertRaisesRegex(
TypeError,
"This optimizer was created with a `LearningRateSchedule`",
):
self.model.fit(
self.x_train,
self.y_train,
callbacks=[lr_scheduler],
epochs=2,
)
@pytest.mark.requires_trainable_backend
def test_learning_rate_in_history(self):
lr_scheduler = callbacks.LearningRateScheduler(lambda step, lr: 0.5)
history = self.model.fit(
self.x_train,
self.y_train,
callbacks=[lr_scheduler],
epochs=1,
)
self.assertTrue("learning_rate" in history.history)
self.assertEqual(type(history.history["learning_rate"][0]), float)
self.assertEqual(history.history["learning_rate"][0], 0.5)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/callbacks/callback_test.py | keras/src/callbacks/callback_test.py | import numpy as np
import pytest
from keras.src import models
from keras.src import testing
from keras.src.callbacks.callback import Callback
class CallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_model_state_is_current_on_epoch_end(self):
class TestModel(models.Model):
def __init__(self):
super().__init__()
self.iterations = self.add_variable(
shape=(), initializer="zeros", trainable=False
)
def call(self, inputs):
self.iterations.assign(self.iterations + 1)
return inputs
class CBK(Callback):
def on_batch_end(self, batch, logs):
assert np.int32(self.model.iterations) == batch + 1
model = TestModel()
model.compile(optimizer="sgd", loss="mse")
x = np.random.random((8, 1))
y = np.random.random((8, 1))
model.fit(x, y, callbacks=[CBK()], batch_size=2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/config.py | keras/src/backend/config.py | import json
import os
from keras.src.api_export import keras_export
# The type of float to use throughout a session.
_FLOATX = "float32"
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = "channels_last"
# Default backend: TensorFlow.
_BACKEND = "tensorflow"
# Whether NNX is enabled.
_NNX_ENABLED = False
# Cap run duration for debugging.
_MAX_EPOCHS = None
_MAX_STEPS_PER_EPOCH = None
@keras_export(["keras.config.floatx", "keras.backend.floatx"])
def floatx():
"""Return the default float type, as a string.
E.g. `'bfloat16'`, `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> keras.config.floatx()
'float32'
"""
return _FLOATX
@keras_export(["keras.config.set_floatx", "keras.backend.set_floatx"])
def set_floatx(value):
"""Set the default float dtype.
Note: It is not recommended to set this to `"float16"` for training,
as this will likely cause numeric stability issues.
Instead, mixed precision, which leverages
a mix of `float16` and `float32`. It can be configured by calling
`keras.mixed_precision.set_dtype_policy('mixed_float16')`.
Args:
value: String; `'bfloat16'`, `'float16'`, `'float32'`, or `'float64'`.
Examples:
>>> keras.config.floatx()
'float32'
>>> keras.config.set_floatx('float64')
>>> keras.config.floatx()
'float64'
>>> # Set it back to float32
>>> keras.config.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
accepted_dtypes = {"bfloat16", "float16", "float32", "float64"}
if value not in accepted_dtypes:
raise ValueError(
f"Unknown `floatx` value: {value}. "
f"Expected one of {accepted_dtypes}"
)
_FLOATX = str(value)
@keras_export(["keras.config.epsilon", "keras.backend.epsilon"])
def epsilon():
"""Return the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> keras.config.epsilon()
1e-07
"""
return _EPSILON
@keras_export(["keras.config.set_epsilon", "keras.backend.set_epsilon"])
def set_epsilon(value):
"""Set the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Examples:
>>> keras.config.epsilon()
1e-07
>>> keras.config.set_epsilon(1e-5)
>>> keras.config.epsilon()
1e-05
>>> # Set it back to the default value.
>>> keras.config.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
@keras_export(
[
"keras.config.image_data_format",
"keras.backend.image_data_format",
]
)
def image_data_format():
"""Return the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`.
Example:
>>> keras.config.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
@keras_export(
[
"keras.config.set_image_data_format",
"keras.backend.set_image_data_format",
]
)
def set_image_data_format(data_format):
"""Set the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Examples:
>>> keras.config.image_data_format()
'channels_last'
>>> keras.config.set_image_data_format('channels_first')
>>> keras.config.image_data_format()
'channels_first'
>>> # Set it back to `'channels_last'`
>>> keras.config.set_image_data_format('channels_last')
"""
global _IMAGE_DATA_FORMAT
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
_IMAGE_DATA_FORMAT = data_format
@keras_export("keras.config.enable_flash_attention")
def enable_flash_attention():
"""Enable flash attention.
Flash attention offers performance optimization for attention layers,
making it especially useful for large language models (LLMs) that
benefit from faster and more memory-efficient attention computations.
Once enabled, supported layers like `MultiHeadAttention` will **attempt** to
use flash attention for faster computations. By default, this feature is
enabled.
Note that enabling flash attention does not guarantee it will always be
used. Typically, the inputs must be in `float16` or `bfloat16` dtype, and
input layout requirements may vary depending on the backend.
"""
from keras.src.backend.common import global_state
global_state.set_global_attribute("flash_attention", None)
@keras_export("keras.config.disable_flash_attention")
def disable_flash_attention():
"""Disable flash attention.
Flash attention offers performance optimization for attention layers,
making it especially useful for large language models (LLMs) that
benefit from faster and more memory-efficient attention computations.
Once disabled, supported layers like `MultiHeadAttention` will not
use flash attention for faster computations.
"""
from keras.src.backend.common import global_state
global_state.set_global_attribute("flash_attention", False)
@keras_export("keras.config.is_flash_attention_enabled")
def is_flash_attention_enabled():
"""Checks whether flash attention is globally enabled in Keras.
Flash attention is a performance-optimized method for computing attention
in large models, such as transformers, allowing for faster and more
memory-efficient operations. This function checks the global Keras
configuration to determine if flash attention is enabled for compatible
layers (e.g., `MultiHeadAttention`).
Note that enabling flash attention does not guarantee it will always be
used. Typically, the inputs must be in `float16` or `bfloat16` dtype, and
input layout requirements may vary depending on the backend.
Returns:
`False` if disabled; otherwise, it indicates that it is enabled.
"""
from keras.src.backend.common import global_state
return global_state.get_global_attribute("flash_attention", default=None)
@keras_export("keras.config.is_nnx_enabled")
def is_nnx_enabled():
"""Checks whether NNX specific features are enabled for the JAX backend.
Returns:
bool: `True` if NNX backend features are enabled, `False` otherwise.
Defaults to `False`.
"""
return _NNX_ENABLED
def set_nnx_enabled(value):
global _NNX_ENABLED
from keras.src.backend.common import global_state
_NNX_ENABLED = bool(value)
if _NNX_ENABLED:
try:
from flax import nnx # noqa F401
except ImportError:
raise ImportError(
"To use NNX with the JAX backend, you must install `flax`."
)
global_state.set_global_attribute("nnx_enabled", bool(value))
def standardize_data_format(data_format):
if data_format is None:
return image_data_format()
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
return data_format
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if "KERAS_HOME" in os.environ:
_KERAS_DIR = os.environ.get("KERAS_HOME")
else:
_keras_base_dir = os.path.expanduser("~")
if not os.access(_keras_base_dir, os.W_OK):
_keras_base_dir = "/tmp"
_KERAS_DIR = os.path.join(_keras_base_dir, ".keras")
def keras_home():
# Private accessor for the keras home location.
return _KERAS_DIR
# Attempt to read Keras config file.
_config_path = os.path.expanduser(os.path.join(_KERAS_DIR, "keras.json"))
if os.path.exists(_config_path):
try:
with open(_config_path) as f:
_config = json.load(f)
except ValueError:
_config = {}
_floatx = _config.get("floatx", floatx())
assert _floatx in {"float16", "float32", "float64"}
_epsilon = _config.get("epsilon", epsilon())
assert isinstance(_epsilon, float)
_backend = _config.get("backend", _BACKEND)
_image_data_format = _config.get("image_data_format", image_data_format())
assert _image_data_format in {"channels_last", "channels_first"}
_nnx_enabled_config = _config.get("nnx_enabled", _NNX_ENABLED)
# Apply basic configs that don't cause circular import
set_floatx(_floatx)
_NNX_ENABLED = _nnx_enabled_config
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
_BACKEND = _backend
# Save config file, if possible.
if not os.path.exists(_KERAS_DIR):
try:
os.makedirs(_KERAS_DIR)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
"floatx": floatx(),
"epsilon": epsilon(),
"backend": _BACKEND,
"image_data_format": image_data_format(),
}
try:
with open(_config_path, "w") as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
# Set backend based on KERAS_BACKEND flag, if applicable.
if "KERAS_BACKEND" in os.environ:
_backend = os.environ["KERAS_BACKEND"]
if _backend:
_BACKEND = _backend
if "KERAS_MAX_EPOCHS" in os.environ:
_MAX_EPOCHS = int(os.environ["KERAS_MAX_EPOCHS"])
if "KERAS_MAX_STEPS_PER_EPOCH" in os.environ:
_MAX_STEPS_PER_EPOCH = int(os.environ["KERAS_MAX_STEPS_PER_EPOCH"])
if _BACKEND != "tensorflow":
# If we are not running on the tensorflow backend, we should stop tensorflow
# from using all available GPU memory. See
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
@keras_export(
[
"keras.config.backend",
"keras.backend.backend",
]
)
def backend():
"""Publicly accessible method for determining the current backend.
Returns:
String, the name of the backend Keras is currently using. One of
`"tensorflow"`, `"torch"`, or `"jax"`.
Example:
>>> keras.config.backend()
'tensorflow'
"""
return _BACKEND
@keras_export(["keras.config.set_max_epochs"])
def set_max_epochs(max_epochs):
"""Limit the maximum number of epochs for any call to fit.
This will cap the number of epochs for any training run using `model.fit()`.
This is purely for debugging, and can also be set via the `KERAS_MAX_EPOCHS`
environment variable to quickly run a script without modifying its source.
Args:
max_epochs: The integer limit on the number of epochs or `None`. If
`None`, no limit is applied.
"""
global _MAX_EPOCHS
_MAX_EPOCHS = max_epochs
@keras_export(["keras.config.set_max_steps_per_epoch"])
def set_max_steps_per_epoch(max_steps_per_epoch):
"""Limit the maximum number of steps for any call to fit/evaluate/predict.
This will cap the number of steps for single epoch of a call to `fit()`,
`evaluate()`, or `predict()`. This is purely for debugging, and can also be
set via the `KERAS_MAX_STEPS_PER_EPOCH` environment variable to quickly run
a scrip without modifying its source.
Args:
max_epochs: The integer limit on the number of epochs or `None`. If
`None`, no limit is applied.
"""
global _MAX_STEPS_PER_EPOCH
_MAX_STEPS_PER_EPOCH = max_steps_per_epoch
@keras_export(["keras.config.max_epochs"])
def max_epochs():
"""Get the maximum number of epochs for any call to fit.
Retrieves the limit on the number of epochs set by
`keras.config.set_max_epochs` or the `KERAS_MAX_EPOCHS` environment
variable.
Returns:
The integer limit on the number of epochs or `None`, if no limit has
been set.
"""
return _MAX_EPOCHS
@keras_export(["keras.config.max_steps_per_epoch"])
def max_steps_per_epoch():
"""Get the maximum number of steps for any call to fit/evaluate/predict.
Retrieves the limit on the number of epochs set by
`keras.config.set_max_steps_per_epoch` or the `KERAS_MAX_STEPS_PER_EPOCH`
environment variable.
Args:
max_epochs: The integer limit on the number of epochs or `None`. If
`None`, no limit is applied.
"""
return _MAX_STEPS_PER_EPOCH
if "KERAS_NNX_ENABLED" in os.environ:
env_val = os.environ["KERAS_NNX_ENABLED"].lower()
if env_val == "true" or env_val == "1":
_NNX_ENABLED = True
else:
_NNX_ENABLED = False
set_nnx_enabled(_NNX_ENABLED)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/__init__.py | keras/src/backend/__init__.py | from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import Variable
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
from keras.src.backend.tensorflow.core import Variable as BackendVariable
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
from keras.src.backend.jax.core import Variable as BackendVariable
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
from keras.src.backend.torch.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
from keras.src.backend.numpy.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "openvino":
from keras.src.backend.openvino import * # noqa: F403
from keras.src.backend.openvino.core import Variable as BackendVariable
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable): # noqa: F811
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/image.py | keras/src/backend/jax/image.py | import functools
import jax
import jax.numpy as jnp
from keras.src import backend
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.random.seed_generator import draw_seed
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
AFFINE_TRANSFORM_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
MAP_COORDINATES_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
SCALE_AND_TRANSLATE_METHODS = {
"linear",
"bilinear",
"trilinear",
"cubic",
"bicubic",
"tricubic",
"lanczos3",
"lanczos5",
}
def rgb_to_grayscale(images, data_format=None):
images = convert_to_tensor(images)
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
# Convert to floats
original_dtype = images.dtype
compute_dtype = backend.result_type(images.dtype, float)
images = images.astype(compute_dtype)
# Ref: tf.image.rgb_to_grayscale
rgb_weights = convert_to_tensor(
[0.2989, 0.5870, 0.1140], dtype=images.dtype
)
images = jnp.tensordot(images, rgb_weights, axes=(channels_axis, -1))
images = jnp.expand_dims(images, axis=channels_axis)
return images.astype(original_dtype)
def rgb_to_hsv(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
eps = jnp.finfo(dtype).eps
images = jnp.where(jnp.abs(images) < eps, 0.0, images)
red, green, blue = jnp.split(images, 3, channels_axis)
red = jnp.squeeze(red, channels_axis)
green = jnp.squeeze(green, channels_axis)
blue = jnp.squeeze(blue, channels_axis)
def rgb_planes_to_hsv_planes(r, g, b):
value = jnp.maximum(jnp.maximum(r, g), b)
minimum = jnp.minimum(jnp.minimum(r, g), b)
range_ = value - minimum
safe_value = jnp.where(value > 0, value, 1.0)
safe_range = jnp.where(range_ > 0, range_, 1.0)
saturation = jnp.where(value > 0, range_ / safe_value, 0.0)
norm = 1.0 / (6.0 * safe_range)
hue = jnp.where(
value == g,
norm * (b - r) + 2.0 / 6.0,
norm * (r - g) + 4.0 / 6.0,
)
hue = jnp.where(value == r, norm * (g - b), hue)
hue = jnp.where(range_ > 0, hue, 0.0) + (hue < 0.0).astype(hue.dtype)
return hue, saturation, value
images = jnp.stack(
rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis
)
return images
def hsv_to_rgb(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
hue, saturation, value = jnp.split(images, 3, channels_axis)
hue = jnp.squeeze(hue, channels_axis)
saturation = jnp.squeeze(saturation, channels_axis)
value = jnp.squeeze(value, channels_axis)
def hsv_planes_to_rgb_planes(hue, saturation, value):
dh = jnp.mod(hue, 1.0) * 6.0
dr = jnp.clip(jnp.abs(dh - 3.0) - 1.0, 0.0, 1.0)
dg = jnp.clip(2.0 - jnp.abs(dh - 2.0), 0.0, 1.0)
db = jnp.clip(2.0 - jnp.abs(dh - 4.0), 0.0, 1.0)
one_minus_s = 1.0 - saturation
red = value * (one_minus_s + saturation * dr)
green = value * (one_minus_s + saturation * dg)
blue = value * (one_minus_s + saturation * db)
return red, green, blue
images = jnp.stack(
hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis
)
return images
def resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if fill_mode != "constant":
raise ValueError(
"Invalid value for argument `fill_mode`. Only `'constant'` "
f"is supported. Received: fill_mode={fill_mode}"
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
"can be `True`."
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
target_height, target_width = size
if len(images.shape) == 4:
if data_format == "channels_last":
size = (images.shape[0],) + size + (images.shape[-1],)
else:
size = (images.shape[0], images.shape[1]) + size
batch_size = images.shape[0]
elif len(images.shape) == 3:
if data_format == "channels_last":
size = size + (images.shape[-1],)
else:
size = (images.shape[0],) + size
else:
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if crop_to_aspect_ratio:
shape = images.shape
if data_format == "channels_last":
height, width = shape[-3], shape[-2]
else:
height, width = shape[-2], shape[-1]
crop_height = int(float(width * target_height) / target_width)
crop_height = max(min(height, crop_height), 1)
crop_width = int(float(height * target_width) / target_height)
crop_width = max(min(width, crop_width), 1)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
if data_format == "channels_last":
if len(images.shape) == 4:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
images = images[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(images.shape) == 4:
images = images[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
elif pad_to_aspect_ratio:
shape = images.shape
if data_format == "channels_last":
height, width, channels = shape[-3], shape[-2], shape[-1]
else:
height, width, channels = shape[-2], shape[-1], shape[-3]
pad_height = int(float(width * target_height) / target_width)
pad_height = max(height, pad_height)
pad_width = int(float(height * target_width) / target_height)
pad_width = max(width, pad_width)
img_box_hstart = int(float(pad_height - height) / 2)
img_box_wstart = int(float(pad_width - width) / 2)
if data_format == "channels_last":
if img_box_hstart > 0:
if len(images.shape) == 4:
padded_img = jnp.concatenate(
[
jnp.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
jnp.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
)
else:
padded_img = jnp.concatenate(
[
jnp.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
jnp.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=0,
)
elif img_box_wstart > 0:
if len(images.shape) == 4:
padded_img = jnp.concatenate(
[
jnp.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
images,
jnp.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=2,
)
else:
padded_img = jnp.concatenate(
[
jnp.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
images,
jnp.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
)
else:
padded_img = images
else:
if img_box_hstart > 0:
if len(images.shape) == 4:
padded_img = jnp.concatenate(
[
jnp.ones(
(batch_size, channels, img_box_hstart, width)
)
* fill_value,
images,
jnp.ones(
(batch_size, channels, img_box_hstart, width)
)
* fill_value,
],
axis=2,
)
else:
padded_img = jnp.concatenate(
[
jnp.ones((channels, img_box_hstart, width))
* fill_value,
images,
jnp.ones((channels, img_box_hstart, width))
* fill_value,
],
axis=1,
)
elif img_box_wstart > 0:
if len(images.shape) == 4:
padded_img = jnp.concatenate(
[
jnp.ones(
(batch_size, channels, height, img_box_wstart)
)
* fill_value,
images,
jnp.ones(
(batch_size, channels, height, img_box_wstart)
)
* fill_value,
],
axis=3,
)
else:
padded_img = jnp.concatenate(
[
jnp.ones((channels, height, img_box_wstart))
* fill_value,
images,
jnp.ones((channels, height, img_box_wstart))
* fill_value,
],
axis=2,
)
else:
padded_img = images
images = padded_img
return jax.image.resize(
images, size, method=interpolation, antialias=antialias
)
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
transform = convert_to_tensor(transform)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# unbatched case
need_squeeze = False
if len(images.shape) == 3:
images = jnp.expand_dims(images, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = jnp.expand_dims(transform, axis=0)
if data_format == "channels_first":
images = jnp.transpose(images, (0, 2, 3, 1))
batch_size = images.shape[0]
# get indices
meshgrid = jnp.meshgrid(
*[jnp.arange(size) for size in images.shape[1:]], indexing="ij"
)
indices = jnp.concatenate(
[jnp.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = jnp.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
a0 = transform[:, 0]
a2 = transform[:, 2]
b1 = transform[:, 4]
b2 = transform[:, 5]
transform = transform.at[:, 0].set(b1)
transform = transform.at[:, 2].set(b2)
transform = transform.at[:, 4].set(a0)
transform = transform.at[:, 5].set(a2)
# deal with transform
transform = jnp.pad(
transform, pad_width=[[0, 0], [0, 1]], constant_values=1
)
transform = jnp.reshape(transform, (batch_size, 3, 3))
offset = transform[:, 0:2, 2]
offset = jnp.pad(offset, pad_width=[[0, 0], [0, 1]])
transform = transform.at[:, 0:2, 2].set(0)
# transform the indices
coordinates = jnp.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = jnp.moveaxis(coordinates, source=-1, destination=1)
coordinates += jnp.reshape(offset, shape=(*offset.shape, 1, 1, 1))
# apply affine transformation
_map_coordinates = functools.partial(
jax.scipy.ndimage.map_coordinates,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
mode=fill_mode,
cval=fill_value,
)
affined = jax.vmap(_map_coordinates)(images, coordinates)
if data_format == "channels_first":
affined = jnp.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = jnp.squeeze(affined, axis=0)
return affined
def perspective_transform(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if start_points.shape[-2:] != (4, 2) or start_points.ndim not in (2, 3):
raise ValueError(
"Invalid start_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {start_points.shape}"
)
if end_points.shape[-2:] != (4, 2) or end_points.ndim not in (2, 3):
raise ValueError(
"Invalid end_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {end_points.shape}"
)
if start_points.shape != end_points.shape:
raise ValueError(
"start_points and end_points must have the same shape."
f" Received start_points.shape={start_points.shape}, "
f"end_points.shape={end_points.shape}"
)
need_squeeze = False
if len(images.shape) == 3:
images = jnp.expand_dims(images, axis=0)
need_squeeze = True
if len(start_points.shape) == 2:
start_points = jnp.expand_dims(start_points, axis=0)
if len(end_points.shape) == 2:
end_points = jnp.expand_dims(end_points, axis=0)
if data_format == "channels_first":
images = jnp.transpose(images, (0, 2, 3, 1))
_, height, width, _ = images.shape
transforms = compute_homography_matrix(
jnp.asarray(start_points, dtype="float32"),
jnp.asarray(end_points, dtype="float32"),
)
x, y = jnp.meshgrid(jnp.arange(width), jnp.arange(height), indexing="xy")
grid = jnp.stack([x.ravel(), y.ravel(), jnp.ones_like(x).ravel()], axis=0)
def transform_coordinates(transform):
denom = transform[6] * grid[0] + transform[7] * grid[1] + 1.0
x_in = (
transform[0] * grid[0] + transform[1] * grid[1] + transform[2]
) / denom
y_in = (
transform[3] * grid[0] + transform[4] * grid[1] + transform[5]
) / denom
return jnp.stack([y_in, x_in], axis=0)
transformed_coords = jax.vmap(transform_coordinates)(transforms)
def interpolate_image(image, coords):
def interpolate_channel(channel_img):
return jax.scipy.ndimage.map_coordinates(
channel_img,
coords,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
mode="constant",
cval=fill_value,
).reshape(height, width)
return jax.vmap(interpolate_channel, in_axes=0)(
jnp.moveaxis(image, -1, 0)
)
output = jax.vmap(interpolate_image, in_axes=(0, 0))(
images, transformed_coords
)
output = jnp.moveaxis(output, 1, -1)
if data_format == "channels_first":
output = jnp.transpose(output, (0, 3, 1, 2))
if need_squeeze:
output = jnp.squeeze(output, axis=0)
return output
def compute_homography_matrix(start_points, end_points):
start_x, start_y = start_points[..., 0], start_points[..., 1]
end_x, end_y = end_points[..., 0], end_points[..., 1]
zeros = jnp.zeros_like(end_x)
ones = jnp.ones_like(end_x)
x_rows = jnp.stack(
[
end_x,
end_y,
ones,
zeros,
zeros,
zeros,
-start_x * end_x,
-start_x * end_y,
],
axis=-1,
)
y_rows = jnp.stack(
[
zeros,
zeros,
zeros,
end_x,
end_y,
ones,
-start_y * end_x,
-start_y * end_y,
],
axis=-1,
)
coefficient_matrix = jnp.concatenate([x_rows, y_rows], axis=1)
target_vector = jnp.expand_dims(
jnp.concatenate([start_x, start_y], axis=-1), axis=-1
)
homography_matrix = jnp.linalg.solve(coefficient_matrix, target_vector)
return homography_matrix.squeeze(-1)
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0.0
):
inputs = convert_to_tensor(inputs)
coordinates = convert_to_tensor(coordinates)
if coordinates.shape[0] != len(inputs.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {inputs.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
if fill_mode not in MAP_COORDINATES_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(MAP_COORDINATES_FILL_MODES)}. Received: "
f"fill_mode={fill_mode}"
)
if order not in range(2):
raise ValueError(
"Invalid value for argument `order`. Expected one of "
f"{[0, 1]}. Received: order={order}"
)
return jax.scipy.ndimage.map_coordinates(
inputs, coordinates, order, fill_mode, fill_value
)
def gaussian_blur(
images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None
):
def _create_gaussian_kernel(kernel_size, sigma, dtype):
def _get_gaussian_kernel1d(size, sigma):
x = jnp.arange(size, dtype=dtype) - jnp.array(
(size - 1) / 2, dtype=dtype
)
kernel1d = jnp.exp(-0.5 * (x / sigma) ** 2)
return kernel1d / jnp.sum(kernel1d)
def _get_gaussian_kernel2d(size, sigma):
kernel1d_x = _get_gaussian_kernel1d(size[0], sigma[0])
kernel1d_y = _get_gaussian_kernel1d(size[1], sigma[1])
return jnp.outer(kernel1d_y, kernel1d_x)
kernel = _get_gaussian_kernel2d(kernel_size, sigma)[
jnp.newaxis, jnp.newaxis, :, :
]
return kernel
images = convert_to_tensor(images)
dtype = backend.standardize_dtype(images.dtype)
sigma = convert_to_tensor(sigma, dtype=dtype)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
need_squeeze = False
if images.ndim == 3:
images = images[jnp.newaxis, ...]
need_squeeze = True
if data_format == "channels_last":
images = jnp.transpose(images, (0, 3, 1, 2))
num_channels = images.shape[1]
kernel = _create_gaussian_kernel(kernel_size, sigma, dtype)
kernel = jnp.tile(kernel, (num_channels, 1, 1, 1))
blurred_images = jax.lax.conv_general_dilated(
images,
kernel,
window_strides=(1, 1),
padding="SAME",
dimension_numbers=("NCHW", "OIHW", "NCHW"),
feature_group_count=num_channels,
)
if data_format == "channels_last":
blurred_images = jnp.transpose(blurred_images, (0, 2, 3, 1))
if need_squeeze:
blurred_images = blurred_images.squeeze(axis=0)
return blurred_images
def elastic_transform(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
images = convert_to_tensor(images)
alpha = convert_to_tensor(alpha)
sigma = convert_to_tensor(sigma)
input_dtype = images.dtype
kernel_size = (int(6 * sigma) | 1, int(6 * sigma) | 1)
need_squeeze = False
if len(images.shape) == 3:
images = jnp.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_last":
batch_size, height, width, channels = images.shape
channel_axis = -1
else:
batch_size, channels, height, width = images.shape
channel_axis = 1
seed = draw_seed(seed)
dx = (
jax.random.normal(
seed, shape=(batch_size, height, width), dtype=input_dtype
)
* sigma
)
dy = (
jax.random.normal(
seed, shape=(batch_size, height, width), dtype=input_dtype
)
* sigma
)
dx = gaussian_blur(
jnp.expand_dims(dx, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dy = gaussian_blur(
jnp.expand_dims(dy, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dx = jnp.squeeze(dx)
dy = jnp.squeeze(dy)
x, y = jnp.meshgrid(jnp.arange(width), jnp.arange(height))
x, y = x[None, :, :], y[None, :, :]
distorted_x = x + alpha * dx
distorted_y = y + alpha * dy
transformed_images = jnp.zeros_like(images)
if data_format == "channels_last":
for i in range(channels):
transformed_images = transformed_images.at[..., i].set(
jnp.stack(
[
map_coordinates(
images[b, ..., i],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[
interpolation
],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
)
else:
for i in range(channels):
transformed_images = transformed_images.at[:, i, :, :].set(
jnp.stack(
[
map_coordinates(
images[b, i, ...],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[
interpolation
],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
)
if need_squeeze:
transformed_images = jnp.squeeze(transformed_images, axis=0)
transformed_images = transformed_images.astype(input_dtype)
return transformed_images
def scale_and_translate(
images,
output_shape,
scale,
translation,
spatial_dims,
method,
antialias=True,
):
if method not in SCALE_AND_TRANSLATE_METHODS:
raise ValueError(
"Invalid value for argument `method`. Expected of one "
f"{SCALE_AND_TRANSLATE_METHODS}. Received: method={method}"
)
images = convert_to_tensor(images)
scale = convert_to_tensor(scale)
translation = convert_to_tensor(translation)
return jax.image.scale_and_translate(
images,
output_shape,
spatial_dims,
scale,
translation,
method,
antialias,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/layer.py | keras/src/backend/jax/layer.py | from keras.src.backend.config import is_nnx_enabled
if is_nnx_enabled():
from flax import nnx
class BaseLayer(nnx.Module):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(pytree=False, **kwargs)
else:
BaseLayer = object
class JaxLayer(BaseLayer):
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/export.py | keras/src/backend/jax/export.py | import copy
import inspect
import itertools
import string
import warnings
from keras.src import tree
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.utils.module_utils import tensorflow as tf
class JaxExportArchive:
def __init__(self):
self._backend_variables = []
self._backend_trainable_variables = []
self._backend_non_trainable_variables = []
def _track_layer(self, layer):
# Variables in the lists below are actually part of the trackables
# that get saved, because the lists are created in __init__.
trainable_variables = layer.trainable_variables
non_trainable_variables = layer.non_trainable_variables
self._tf_trackable.trainable_variables += tree.map_structure(
self._convert_to_tf_variable, trainable_variables
)
self._tf_trackable.non_trainable_variables += tree.map_structure(
self._convert_to_tf_variable, non_trainable_variables
)
self._tf_trackable.variables = (
self._tf_trackable.trainable_variables
+ self._tf_trackable.non_trainable_variables
)
self._backend_trainable_variables += trainable_variables
self._backend_non_trainable_variables += non_trainable_variables
self._backend_variables = (
self._backend_trainable_variables
+ self._backend_non_trainable_variables
)
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
jax2tf_kwargs = kwargs.pop("jax2tf_kwargs", None)
# Use `copy.copy()` to avoid modification issues.
jax2tf_kwargs = copy.copy(jax2tf_kwargs) or {}
is_static = bool(kwargs.pop("is_static", False))
# Configure `jax2tf_kwargs`
if "native_serialization" not in jax2tf_kwargs:
jax2tf_kwargs["native_serialization"] = (
self._check_device_compatible()
)
if "polymorphic_shapes" not in jax2tf_kwargs:
jax2tf_kwargs["polymorphic_shapes"] = self._to_polymorphic_shape(
input_signature
)
# Note: we truncate the number of parameters to what is specified by
# `input_signature`.
fn_signature = inspect.signature(fn)
fn_parameters = list(fn_signature.parameters.values())
if is_static:
from jax.experimental import jax2tf
jax_fn = jax2tf.convert(fn, **jax2tf_kwargs)
jax_fn.__signature__ = inspect.Signature(
parameters=fn_parameters[0 : len(input_signature)],
return_annotation=fn_signature.return_annotation,
)
decorated_fn = tf.function(
jax_fn,
input_signature=input_signature,
autograph=False,
)
else:
# 1. Create a stateless wrapper for `fn`
# 2. jax2tf the stateless wrapper
# 3. Create a stateful function that binds the variables with
# the jax2tf converted stateless wrapper
# 4. Make the signature of the stateful function the same as the
# original function
# 5. Wrap in a `tf.function`
def stateless_fn(variables, *args, **kwargs):
state_mapping = zip(self._backend_variables, variables)
with StatelessScope(state_mapping=state_mapping) as scope:
output = fn(*args, **kwargs)
# Gather updated non-trainable variables
non_trainable_variables = []
for var in self._backend_non_trainable_variables:
new_value = scope.get_current_value(var)
non_trainable_variables.append(new_value)
return output, non_trainable_variables
jax2tf_stateless_fn = self._convert_jax2tf_function(
stateless_fn, input_signature, jax2tf_kwargs=jax2tf_kwargs
)
def stateful_fn(*args, **kwargs):
output, non_trainable_variables = jax2tf_stateless_fn(
# Change the trackable `ListWrapper` to a plain `list`
list(self._tf_trackable.variables),
*args,
**kwargs,
)
for var, new_value in zip(
self._tf_trackable.non_trainable_variables,
non_trainable_variables,
):
var.assign(tf.cast(new_value, var.dtype))
return output
stateful_fn.__signature__ = inspect.Signature(
parameters=fn_parameters[0 : len(input_signature)],
return_annotation=fn_signature.return_annotation,
)
decorated_fn = tf.function(
stateful_fn,
input_signature=input_signature,
autograph=False,
)
return decorated_fn
def _convert_jax2tf_function(self, fn, input_signature, jax2tf_kwargs=None):
from jax.experimental import jax2tf
variables_shapes = self._to_polymorphic_shape(
self._backend_variables, allow_none=False
)
input_shapes = list(jax2tf_kwargs["polymorphic_shapes"])
jax2tf_kwargs["polymorphic_shapes"] = [variables_shapes] + input_shapes
return jax2tf.convert(fn, **jax2tf_kwargs)
def _to_polymorphic_shape(self, struct, allow_none=True):
if allow_none:
# Generates unique names: a, b, ... z, aa, ab, ... az, ba, ... zz
# for unknown non-batch dims. Defined here to be scope per endpoint.
dim_names = itertools.chain(
string.ascii_lowercase,
itertools.starmap(
lambda a, b: a + b,
itertools.product(string.ascii_lowercase, repeat=2),
),
)
def convert_shape(x):
poly_shape = []
for index, dim in enumerate(list(x.shape)):
if dim is not None:
poly_shape.append(str(dim))
elif not allow_none:
raise ValueError(
f"Illegal None dimension in {x} with shape {x.shape}"
)
elif index == 0:
poly_shape.append("batch")
else:
poly_shape.append(next(dim_names))
return f"({', '.join(poly_shape)})"
return tree.map_structure(convert_shape, struct)
def _check_device_compatible(self):
from jax import default_backend as jax_device
if (
jax_device() == "gpu"
and len(tf.config.list_physical_devices("GPU")) == 0
):
warnings.warn(
"JAX backend is using GPU for export, but installed "
"TF package cannot access GPU, so reloading the model with "
"the TF runtime in the same environment will not work. "
"To use JAX-native serialization for high-performance export "
"and serving, please install `tensorflow-gpu` and ensure "
"CUDA version compatibility between your JAX and TF "
"installations."
)
return False
else:
return True
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/distribution_lib.py | keras/src/backend/jax/distribution_lib.py | """Utilities for distribution strategy with JAX backend."""
import jax
import numpy as np
from keras.src.backend.common import global_state
from keras.src.random import seed_generator
from keras.src.utils import jax_utils
from keras.src.utils import rng_utils
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note that this should return the global devices in a distributed setting.
Args:
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"`
or `"tpu"` if available when device_type is not provided. Otherwise
will return the `"cpu"` devices.
Return:
List of devices that are available for distribute computation.
"""
device_type = device_type.lower() if device_type else None
jax_devices = jax.devices(backend=device_type)
return [f"{device.platform}:{device.id}" for device in jax_devices]
def get_device_count(device_type=None):
"""Returns the number of available JAX devices.
Args:
device_type: Optional device type to count (e.g., "cpu", "gpu", "tpu").
If `None`, it defaults to counting "gpu" or "tpu" devices if
available, otherwise it counts "cpu" devices. It does not
return the sum of all device types.
Returns:
int: The total number of JAX devices for the specified type.
"""
device_type = device_type.lower() if device_type else None
return jax.device_count(device_type)
def distribute_variable(value, layout):
"""Create a distributed variable for JAX.
Since JAX doesn't have a variable class, this will just return a `jax.Array`
with the corresponding layout/sharding specified.
Note that this function should be used in eager context, not in jitted
function.
Args:
value: the initial value of the variable.
layout: `TensorLayout` for the created variable, or a
JAX-supported layout instance (e.g. `jax.sharding.Sharding`).
Returns:
jax.Array which is the distributed variable.
"""
return distribute_tensor(value, layout)
def distribute_tensor(tensor, layout):
"""Distribute the tensor based on the layout.
Note that this function can be used both in eager context, or within a
jitted function.
Args:
tensor: `jax.Array` that need to be distributed.
layout: `TensorLayout` for the created variable, or a
JAX-supported layout instance (e.g. `jax.sharding.Sharding`).
Returns:
Distributed value.
"""
# Avoid circular imports.
from keras.src.distribution import TensorLayout
if isinstance(layout, TensorLayout):
layout = layout.backend_layout
# TODO(scottzhu): This might not be a cheap check, we should consider
# have some proper JAX API for doing this check.
if jax_utils.is_in_jax_tracing_scope():
return jax.lax.with_sharding_constraint(tensor, layout)
# Skip relayout if unnecessary.
if isinstance(tensor, jax.Array):
if isinstance(
layout, jax.sharding.Sharding
) and tensor.sharding.is_equivalent_to(layout, ndim=len(tensor.shape)):
return tensor
# JAX explicit "layout" support.
elif hasattr(layout, "layout"):
current_layout = getattr(tensor, "layout", None)
if current_layout == layout:
return tensor
# JAX explicit "format" support.
elif hasattr(layout, "format"):
current_layout = getattr(tensor, "format", None)
if current_layout == layout:
return tensor
return jax.device_put(tensor, layout)
def distribute_data_input(per_process_batch, layout, batch_dim_name):
"""Distribute the input data with the corresponding layout.
Note that the inputs here is a local worker batch. Within the local worker,
the data need to be further partitioned to map to each of the devices.
Args:
inputs: `jax.Array` that is already sharded to a local process size.
layout: `TensorLayout` for the distribution information, or a
`jax.sharding.Sharding` instance.
Returns:
A global batch distributed according to `layout`.
"""
# Avoid circular imports.
from keras.src.distribution import TensorLayout
if isinstance(layout, TensorLayout):
layout = layout.backend_layout
return jax.make_array_from_process_local_data(layout, per_process_batch)
def initialize_rng():
"""Initializes the global random number generator across processes.
This is required for consistent initialization in multi-host settings.
"""
global_seed = rng_utils.get_random_seed()
# Only set a random seed if not already set
# via keras.config.set_random_seed()
if global_seed is None:
# Generate a random seed on each CPU host and psum them to get a single
# consistent seed across all processes.
cpu_devices = jax.devices("cpu")
num_local_cpu_devices = jax.local_device_count("cpu")
# Seed must be in range [0, 2^32 - 1], so to ensure proper range and
# avoid signed integer overflow, we use uint32.
local_seed = jax.numpy.asarray(
[seed_generator.make_default_seed()] * num_local_cpu_devices,
dtype=jax.numpy.uint32,
)
# Sum across processes and pull out the first item.
global_seed = jax.pmap(
lambda x: jax.lax.psum(x, "all"),
axis_name="all",
devices=cpu_devices,
)(local_seed).item(0)
# Set the global seed.
rng_utils.set_random_seed(global_seed)
# Check if the global seed generator is set and ensure it has an initialized
# seed. Otherwise, reset the seed to the global seed.
global_seed_generator = global_state.get_global_attribute(
seed_generator.GLOBAL_SEED_GENERATOR
)
if global_seed_generator is not None:
seed = global_seed_generator.get_config()["seed"]
if seed is None:
global_state.set_global_attribute(
seed_generator.GLOBAL_SEED_GENERATOR,
seed_generator.SeedGenerator(
seed=global_seed,
name=global_seed_generator.name,
backend=global_seed_generator.backend,
),
)
def initialize(job_addresses, num_processes, process_id):
if job_addresses and "," in job_addresses:
# When user provide all the job addresses, we will split and get the
# first one, which is the coordinator.
job_addresses = job_addresses.split(",")
# Do a sanity check to make sure the number of addresses also match
# the num_processes.
if num_processes is not None and num_processes != len(job_addresses):
raise ValueError(
f"The provided job_addresses {job_addresses} has "
f"{len(job_addresses)} jobs, but num_processes is "
f"{num_processes}"
)
coordinator_address = job_addresses[0]
else:
coordinator_address = job_addresses
jax.distributed.initialize(
coordinator_address=coordinator_address,
num_processes=num_processes,
process_id=process_id,
)
# Ensure the random number generator is initialized across processes.
initialize_rng()
def num_processes():
"""Return the number of processes for the current distribution setting."""
return jax.process_count()
def process_id():
"""Return the current process ID for the distribution setting."""
return jax.process_index()
def _to_backend_device(device_name):
if isinstance(device_name, jax.Device):
return device_name
device_name = str(device_name)
if ":" not in device_name:
device_type, device_id = device_name, 0
else:
device_type, device_id = device_name.split(":")
devices = jax.devices(backend=device_type)
for device in devices:
if device.platform == device_type and device.id == int(device_id):
return device
raise ValueError(f"Device not found: {device_name}")
def _to_backend_mesh(device_mesh):
"""Convert the DeviceMesh to JAX backend specific Mesh.
Args:
device_mesh: DeviceMesh instance to convert.
Returns:
A `jax.sharding.Mesh` instance.
"""
shape = device_mesh.devices.shape
devices = [_to_backend_device(d) for d in device_mesh.devices.flatten()]
devices = np.array(devices).reshape(shape)
return jax.sharding.Mesh(devices, device_mesh.axis_names)
def _to_backend_layout(tensor_layout):
"""Convert the TensorLayout to JAX backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `jax.sharding.NamedSharding` instance.
"""
if tensor_layout.device_mesh is None:
raise ValueError(
"Cannot create sharding when device mesh is not set "
"for TensorLayout."
)
partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)
jax_mesh = tensor_layout.device_mesh.backend_mesh
return jax.sharding.NamedSharding(jax_mesh, partition_spec)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/optimizer.py | keras/src/backend/jax/optimizer.py | """A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g.value for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g.value) / steps
for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/rnn.py | keras/src/backend/jax/rnn.py | import contextlib
from jax import lax
from jax import numpy as jnp
from keras.src import tree
from keras.src.backend.common import stateless_scope
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return jnp.transpose(input_t, axes)
if not time_major:
inputs = tree.map_structure(swap_batch_timestep, inputs)
flattened_inputs = tree.flatten(inputs)
time_steps = flattened_inputs[0].shape[0]
if mask is not None:
if mask.dtype != "bool":
mask = mask.astype("bool")
if len(mask.shape) == 2:
mask = jnp.expand_dims(mask, axis=-1)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tree.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor, but got {mask_t}"
)
if tree.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor, but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = jnp.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
return jnp.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if tree.is_nested(inputs):
processed_input = tree.map_structure(
_process_single_input_t, inputs
)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return tree.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = jnp.zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = jnp.where(tiled_mask_t, output, prev_output)
flat_states = tree.flatten(states)
flat_new_states = tree.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
)
flat_final_states = tuple(
jnp.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
)
)
states = tree.pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = jnp.stack(successive_outputs)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = jnp.stack(successive_outputs)
else: # Unroll == False
if mask is not None:
def _step(states, current_input):
current_input, current_mask = current_input
is_masked = jnp.all(
jnp.logical_not(current_mask), axis=-1, keepdims=True
)
output_t, new_states = step_function(current_input, states)
if zero_output_for_mask:
masked_outs = jnp.where(
is_masked, jnp.zeros_like(output_t), output_t
)
else:
# Assume the first state is the previous output.
output_tm1 = states[0]
if tree.is_nested(output_tm1):
# Stacked RNN case: assume first state of last cell.
output_tm1 = states[-1][0]
masked_outs = jnp.where(is_masked, output_tm1, output_t)
new_states = tree.map_structure(
lambda s, ns: jnp.where(is_masked, s, ns),
states,
new_states,
)
return (new_states, masked_outs)
scan_xs = (inputs, mask)
else:
def _step(states, current_input):
output_t, new_states = step_function(current_input, states)
return new_states, output_t
scan_xs = inputs
if stateless_scope.in_stateless_scope():
# Reuse the existing parent stateless scope.
scope = contextlib.nullcontext()
else:
scope = stateless_scope.StatelessScope()
with scope:
# We must use a stateless scope because `scan` will involve
# JAX tracing -- any variable update at this stage would
# be a leak.
new_states, outputs = lax.scan(
f=_step,
init=initial_states,
xs=scan_xs,
reverse=go_backwards,
)
if go_backwards:
outputs = jnp.flip(outputs, axis=0)
last_output = outputs[-1]
if not time_major:
outputs = tree.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
def cudnn_ok(*args, **kwargs):
return False
def lstm(*args, **kwargs):
raise NotImplementedError
def gru(*args, **kwargs):
raise NotImplementedError
def unstack(x, axis=0):
return [
lax.index_in_dim(x, i, axis, keepdims=False)
for i in range(x.shape[axis])
]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/core.py | keras/src/backend/jax/core.py | import jax
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
import ml_dtypes
import numpy as np
from jax import export as jax_export
from keras.src import tree
from keras.src.backend import config
from keras.src.backend.common import KerasVariable
from keras.src.backend.common import global_state
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.name_scope import name_scope as base_name_scope
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.jax import distribution_lib
SUPPORTS_SPARSE_TENSORS = True
SUPPORTS_RAGGED_TENSORS = False
IS_THREAD_SAFE = True
class JaxVariable(KerasVariable):
def __init__(self, *args, layout=None, **kwargs):
# Intercept layout parameter so that it is available
# during initialization.
self._layout = layout
super().__init__(*args, **kwargs)
def _initialize_layout(self):
# We can't import the keras/distribution/distribution_lib
# due to circular dependency.
distribution = global_state.get_global_attribute("distribution")
if self._layout is None and distribution is not None:
tensor_layout = distribution.get_variable_layout(self)
from keras.src.distribution import TensorLayout
if isinstance(tensor_layout, TensorLayout):
self._layout = tensor_layout.backend_layout
else:
self._layout = tensor_layout
def _initialize(self, value):
# Note that variable.shape is needed by distribution_lib
self._shape = self._validate_shape(value.shape)
self._initialize_layout()
self._direct_assign(value)
def _initialize_with_initializer(self, initializer):
self._initialize_layout()
layout = self._layout
shape = self._shape
if should_shard_at_init(layout, shape):
jitted_initializer = jax.jit(
initializer.__call__,
out_shardings=layout,
static_argnames=["shape", "dtype"],
)
value = jitted_initializer(shape=self._shape, dtype=self._dtype)
self._value = value
else:
super()._initialize_with_initializer(initializer)
def _direct_assign(self, value):
if self._layout is not None:
value = distribution_lib.distribute_variable(value, self._layout)
self._value = value
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype, sparse=False)
# Overload native accessor.
def __jax_array__(self):
return self.value
Variable = JaxVariable
if config.is_nnx_enabled():
from flax import nnx
class NnxVariable(JaxVariable, nnx.Variable):
def __init__(
self,
initializer,
shape=None,
dtype=None,
trainable=True,
autocast=True,
aggregation="none",
synchronization="auto",
name=None,
layout=None,
mutable=None,
**nnx_metadata,
):
# Ensure 'mutable' is in nnx_metadata, but explicit 'mutable'
# param takes precedence.
nnx_metadata["mutable"] = trainable if mutable is None else mutable
# First, initialize a basic nnx.Variable with a dummy value
# This sets up the NNX variable structure
if shape is None:
dummy_value = jnp.array(0.0)
else:
dummy_value = jnp.zeros(shape, dtype=standardize_dtype(dtype))
# Initialize nnx.Variable first
nnx.Variable.__init__(self, value=dummy_value, **nnx_metadata)
# Now we can safely set layout
self._layout = layout
# Initialize JaxVariable (which will call KerasVariable.__init__
# and set up the real value).
JaxVariable.__init__(
self,
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=trainable,
autocast=autocast,
aggregation=aggregation,
synchronization=synchronization,
name=name,
)
# The real value is now set in self._value, sync it to raw_value
object.__setattr__(self, "raw_value", self._value)
def _initialize_with_initializer(self, initializer):
value = self._convert_to_tensor(
initializer(self._shape, dtype=self._dtype)
)
self._initialize(value)
@property
def _value(self):
if hasattr(self, "raw_value"):
return self.raw_value
return None
@_value.setter
def _value(self, new_keras_value):
self._direct_assign(new_keras_value)
def __getstate__(self):
# Get the state from KerasVariable (attributes in __dict__)
# KerasVariable does not have a custom __getstate__, so we mimic
# default behavior.
try:
keras_state = KerasVariable.__getstate__(self)
except AttributeError:
keras_state = object.__getstate__(self)
# Get the state from nnx.Variable
nnx_specific_state = nnx.Variable.__getstate__(self)
# Merge them. Keras state is primary. NNX specific state adds
# to it.
if "raw_value" in nnx_specific_state:
keras_state["_value"] = nnx_specific_state["raw_value"]
# Add NNX attributes that are not in Keras's __dict__
if "_trace_state" in nnx_specific_state:
keras_state["_trace_state"] = nnx_specific_state["_trace_state"]
if "_var_metadata" in nnx_specific_state:
keras_state["_var_metadata"] = nnx_specific_state[
"_var_metadata"
]
# Remove elements that might be problematic or redundant if
# nnx.Variable's __getstate__
keras_state.pop("raw_value", None)
return keras_state
def __setstate__(self, state):
# Separate nnx specific keys that we added if they are not part
# of Keras __dict__ this __getstate__ puts them into the main
# state dictionary.
nnx_raw_value = state["_value"] # This was raw_value
nnx_trace_state = state.pop("_trace_state", None)
nnx_var_metadata = state.pop("_var_metadata", None)
# Populate the instance's __dict__ with the Keras attributes.
self.__dict__.update(state)
# restore the nnx.Variable specific slotted attributes.
object.__setattr__(self, "raw_value", nnx_raw_value)
if nnx_trace_state is not None:
object.__setattr__(self, "_trace_state", nnx_trace_state)
else:
pass
if nnx_var_metadata is not None:
object.__setattr__(self, "_var_metadata", nnx_var_metadata)
else:
pass
# Ensure Keras's self._value is also consistent with the
# restored raw_value
self._value = nnx_raw_value
if hasattr(self, "_shape") and self._shape is not None:
self._ndim = len(self._shape)
else:
# Fallback if shape isn't immediately available.
self._ndim = len(self.raw_value.shape)
def _direct_assign(self, value):
# Apply JAX-specific distribution if layout is present
if self._layout is not None:
value = distribution_lib.distribute_variable(
value, self._layout
)
# Apply on_set_value hook if it exists
if (
hasattr(self, "_var_metadata")
and "on_set_value" in self._var_metadata
):
value = self._var_metadata["on_set_value"](self, value)
# Set the value for both Keras and NNX parts
# This ensures both systems see the same value
object.__setattr__(self, "raw_value", value)
@property
def value(self):
if in_stateless_scope():
scope = get_stateless_scope()
stateless_value = scope.get_current_value(self)
if stateless_value is not None:
return self._maybe_autocast(stateless_value)
if not hasattr(self, "raw_value"):
if self._initializer is not None:
self._initialize(
self._initializer(self.shape, dtype=self.dtype)
)
else:
raise AttributeError(
"Variable is not properly initialized (raw_value "
"missing) and has no initializer."
)
current_value = self.raw_value
if (
hasattr(self, "_var_metadata")
and "on_get_value" in self._var_metadata
):
current_value = self._var_metadata["on_get_value"](
self, current_value
)
return self._maybe_autocast(current_value)
Variable = NnxVariable
def _flatten_nnx_variable(variable):
children = (variable.raw_value,)
# We copy __dict__ to avoid side effects
keras_state = variable.__dict__.copy()
# Remove elements that might be problematic or redundant if
# nnx.Variable's __getstate__
keras_state.pop("raw_value", None)
aux_data = (
variable._var_metadata,
getattr(variable, "_trace_state", None),
keras_state,
)
return children, aux_data
def _unflatten_nnx_variable(aux_data, children):
var_metadata, trace_state, keras_state = aux_data
raw_value = children[0]
# Create uninitialized instance
variable = NnxVariable.__new__(NnxVariable)
# Restore state
variable._var_metadata = var_metadata
if trace_state is not None:
variable._trace_state = trace_state
variable.__dict__.update(keras_state)
variable.raw_value = raw_value
return variable
try:
jax.tree_util.register_pytree_node(
NnxVariable,
_flatten_nnx_variable,
_unflatten_nnx_variable,
)
except ValueError:
pass
def __setattr__(self, name, value):
# Mirror Keras attributes to _var_metadata to ensure persistence
# if the Pytree registration is not respected by NNX.
if (
name != "_var_metadata"
and name not in ("_raw_value", "_trace_state")
and hasattr(self, "_var_metadata")
):
self._var_metadata[name] = value
object.__setattr__(self, name, value)
NnxVariable.__setattr__ = __setattr__
def should_shard_at_init(init_layout, shape):
if not isinstance(init_layout, jax.sharding.NamedSharding):
return False
if all(dim is None for dim in init_layout.spec):
return False
size_threshold = 250 * 1024 * 1024
array_size = np.prod(shape) * 4
return array_size >= size_threshold
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
if ragged:
raise ValueError("`ragged=True` is not supported with jax backend")
if dtype is not None:
dtype = standardize_dtype(dtype)
if isinstance(x, (jnp.ndarray, jax.Array)) and (
dtype is None or x.dtype == dtype
):
# Skip the conversion early if the instance is already a JAX array.
# This is important in the multi-process context since jax.array(x) for
# an existing distributed jax array will raise error.
return x
if isinstance(x, Variable):
if dtype is not None and x.dtype != dtype:
return x.value.astype(dtype)
return x.value
if isinstance(x, jax_sparse.JAXSparse):
if sparse is not None and not sparse:
x = x.todense()
elif dtype is not None and x.dtype != dtype:
return x.astype(dtype)
else:
return x
if not is_tensor(x) and standardize_dtype(dtype) == "bfloat16":
# Can't create bfloat16 arrays on the fly (e.g. from a h5 Dataset).
# Instead we convert "as is" (to stored dtype) and cast.
return jnp.asarray(x).astype(dtype)
return jnp.asarray(x, dtype=dtype)
def convert_to_numpy(x):
if isinstance(x, jax_sparse.JAXSparse):
x = x.todense()
if is_tensor(x) and x.dtype == "bfloat16":
return np.array(x, dtype=ml_dtypes.bfloat16)
return np.array(x)
def is_tensor(x):
if isinstance(x, (jnp.ndarray, jax_sparse.JAXSparse)):
return True
return False
def shape(x):
return x.shape
def cast(x, dtype):
return convert_to_tensor(x, dtype=dtype)
# Shape / dtype / sparseness inference util
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope(), SymbolicScope():
built_in_types = (type(None), int, float, str, bool, complex, bytes)
# First, separate symbolic args from other args
static_args_idx = []
static_args = []
maybe_symbolic_args = []
static_kwargs = {}
maybe_symbolic_kwargs = {}
for idx, arg in enumerate(args):
if isinstance(arg, built_in_types):
static_args_idx.append(idx)
static_args.append(arg)
else:
maybe_symbolic_args.append(arg)
maybe_symbolic_args = tuple(maybe_symbolic_args)
for k, v in kwargs.items():
if isinstance(v, built_in_types):
static_kwargs[k] = v
else:
maybe_symbolic_kwargs[k] = v
# Create a _DimExpr instance for one dimension by creating a symbolic
# shape with one dimension and extracting it.
#
# We create a single dynamic dimension and reuse it instead of creating
# N dynamic dimensions. This is for backwards compatibility. Previously
# we would fill all dynamic dimensions with the same concrete value.
# This can handle the case where there is an implicit assumption that
# two dimensions are the same (e.g. square images).
#
# We add the constraint "dynamic_dimension>=2" to prevent JAX from
# assuming that the dimension can be broadcastable or squeezable. It
# removes this ambiguity.
dynamic_dimension = jax_export.symbolic_shape(
"(dynamic_dimension)",
constraints=["dynamic_dimension>=2"],
)[0]
def convert_keras_tensor_to_jax(x):
if isinstance(x, KerasTensor):
shape = tuple(
[d if d is not None else dynamic_dimension for d in x.shape]
)
return jax.ShapeDtypeStruct(shape, dtype=x.dtype)
return x
def wrapped_fn(*args, **kwargs):
# Turn inputs that are sparse to BCOO tensors
def to_bcoo_if_sparse(x, maybe_symbolic_x):
if (
isinstance(maybe_symbolic_x, KerasTensor)
and maybe_symbolic_x.sparse
):
return jax_sparse.BCOO.fromdense(x, nse=1)
return x
args, kwargs = tree.map_structure(
to_bcoo_if_sparse,
(args, kwargs),
(maybe_symbolic_args, maybe_symbolic_kwargs),
)
rec_args = []
idx_static = 0
idx_sym = 0
i = 0
while idx_static < len(static_args) or idx_sym < len(args):
if i in static_args_idx:
rec_args.append(static_args[idx_static])
idx_static += 1
else:
rec_args.append(args[idx_sym])
idx_sym += 1
i += 1
with StatelessScope():
return fn(*rec_args, **kwargs, **static_kwargs)
maybe_symbolic_args_jax, maybe_symbolic_kwargs_jax = tree.map_structure(
convert_keras_tensor_to_jax,
(maybe_symbolic_args, maybe_symbolic_kwargs),
)
jax_out = jax.eval_shape(
wrapped_fn, *maybe_symbolic_args_jax, **maybe_symbolic_kwargs_jax
)
def convert_jax_spec_to_keras_tensor(x):
if isinstance(x, jax.ShapeDtypeStruct):
shape = tuple(
d if isinstance(d, int) else None for d in x.shape
)
return KerasTensor(shape, x.dtype)
elif isinstance(x, jax_sparse.BCOO):
shape = tuple(
d if isinstance(d, int) else None for d in x.shape
)
return KerasTensor(shape, x.dtype, sparse=True)
return x
return tree.map_structure(convert_jax_spec_to_keras_tensor, jax_out)
def cond(pred, true_fn, false_fn):
return jax.lax.cond(pred, true_fun=true_fn, false_fun=false_fn)
def vectorized_map(function, elements):
return jax.vmap(function)(elements)
def map(f, xs):
return jax.lax.map(f, xs)
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
if not isinstance(unroll, bool):
if not isinstance(unroll, int) or unroll < 1:
raise ValueError(
"`unroll` must be an positive integer or boolean. "
f"Received: unroll={unroll}"
)
return jax.lax.scan(
f, init=init, xs=xs, length=length, reverse=reverse, unroll=unroll
)
def associative_scan(f, elems, reverse=False, axis=0):
return jax.lax.associative_scan(f, elems, reverse, axis)
def scatter(indices, values, shape):
zeros = jnp.zeros(shape, values.dtype)
key = tuple(jnp.moveaxis(indices, -1, 0))
return zeros.at[key].add(values)
def scatter_update(inputs, indices, updates):
inputs = convert_to_tensor(inputs)
indices = jnp.array(indices)
indices = jnp.transpose(indices)
inputs = inputs.at[tuple(indices)].set(updates)
return inputs
def slice(inputs, start_indices, shape):
# If shape[i] is -1, all remaining elements in dimension i are included in
# the slice.
final_shape = tuple(
inputs.shape[i] - start_indices[i] if s == -1 else s
for i, s in enumerate(shape)
)
return jax.lax.dynamic_slice(inputs, start_indices, final_shape)
def slice_update(inputs, start_indices, updates):
return jax.lax.dynamic_update_slice(inputs, updates, start_indices)
def switch(index, branches, *operands):
return jax.lax.switch(index, branches, *operands)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
is_tuple = isinstance(loop_vars, (tuple, list))
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
if maximum_iterations is not None:
current_iter = 0
loop_vars = loop_vars + (current_iter,)
# Unpack list/tuple args. The last argument is `current_iter`.
def _cond(args):
return cond(*args[:-1]) & (args[-1] < maximum_iterations)
def _body(args):
outputs = body(*args[:-1])
outputs = tuple(outputs) if is_tuple else (outputs,)
return outputs + (args[-1] + 1,)
else:
def _cond(args):
return cond(*args)
def _body(args):
outputs = body(*args)
return tuple(outputs) if is_tuple else (outputs,)
outputs = jax.lax.while_loop(_cond, _body, loop_vars)
if maximum_iterations is not None:
outputs = outputs[:-1]
return outputs if is_tuple else outputs[0]
def fori_loop(lower, upper, body_fun, init_val):
return jax.lax.fori_loop(lower, upper, body_fun, init_val)
def stop_gradient(variable):
if isinstance(variable, Variable):
variable = variable.value
return jax.lax.stop_gradient(variable)
def unstack(x, num=None, axis=0):
return [
jax.lax.index_in_dim(x, i, axis, keepdims=False)
for i in range(x.shape[axis])
]
def random_seed_dtype():
# jax random seed uses uint32.
return "uint32"
def custom_gradient(fun):
return jax.custom_gradient(fun=fun)
def remat(f):
"""Implementation of rematerialization.
Args:
f: The function or operation to rematerialize.
Returns:
A function wrapping f that defines a custom gradient, which
recomputes f on the backwards pass of a gradient call.
"""
return jax.checkpoint(f)
class name_scope(base_name_scope):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._jax_name_scope = jax.named_scope(name)
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
self._jax_name_scope.__enter__()
return self
def __exit__(self, *args, **kwargs):
super().__exit__(*args, **kwargs)
if self._pop_on_exit:
self._jax_name_scope.__exit__(*args, **kwargs)
def device_scope(device_name):
if isinstance(device_name, str):
# We support string value like "cpu:0", "gpu:1", etc.
device_name = device_name.lower()
jax_device = distribution_lib._to_backend_device(device_name)
elif not isinstance(device_name, jax.Device):
raise ValueError(
"Invalid value for argument `device_name`. "
"Expected a string like 'gpu:0' or a `jax.Device` instance. "
f"Received: device_name='{device_name}'"
)
else:
jax_device = device_name
return jax.default_device(jax_device)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/nn.py | keras/src/backend/jax/nn.py | import builtins
import inspect
import math
import jax
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from absl import logging
from jax import lax
from jax import nn as jnn
from jax.experimental.pallas.ops.tpu.splash_attention import (
splash_attention_kernel,
)
from jax.experimental.pallas.ops.tpu.splash_attention import (
splash_attention_mask,
)
from keras.src import backend
from keras.src.backend.common.backend_utils import (
compute_adaptive_pooling_window_sizes,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import convert_to_tensor
def relu(x):
x = convert_to_tensor(x)
return jnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return jnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return jnn.sigmoid(x)
def sparse_sigmoid(x):
x = convert_to_tensor(x)
return jnn.sparse_sigmoid(x)
def tanh(x):
x = convert_to_tensor(x)
return jnn.tanh(x)
def tanh_shrink(x):
x = convert_to_tensor(x)
return x - jnp.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return jnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return jnn.soft_sign(x)
def soft_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return jnp.where(
x > threshold,
x - threshold,
jnp.where(x < -threshold, x + threshold, 0.0),
)
def sparse_plus(x):
x = convert_to_tensor(x)
return jnn.sparse_plus(x)
def silu(x):
x = convert_to_tensor(x)
return jnn.silu(x)
def squareplus(x, b=4):
x = convert_to_tensor(x)
return jnn.squareplus(x, b=b)
def log_sigmoid(x):
x = convert_to_tensor(x)
return jnn.log_sigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return jnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return jnn.hard_sigmoid(x)
def hard_silu(x):
x = convert_to_tensor(x)
return jnn.hard_silu(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return jnn.elu(x, alpha=alpha)
def selu(x):
x = convert_to_tensor(x)
return jnn.selu(x)
def gelu(x, approximate=True):
x = convert_to_tensor(x)
return jnn.gelu(x, approximate)
def celu(x, alpha=1.0):
x = convert_to_tensor(x)
return jnn.celu(x, alpha=alpha)
def glu(x, axis=-1):
x = convert_to_tensor(x)
return jnn.glu(x, axis=axis)
def hard_tanh(x):
x = convert_to_tensor(x)
return jnn.hard_tanh(x)
def hard_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return jnp.where(jnp.abs(x) > threshold, x, 0.0)
def threshold(x, threshold, default_value):
x = convert_to_tensor(x)
return jnp.where(x > threshold, x, default_value)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.softmax(x, axis=axis)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.log_softmax(x, axis=axis)
def sparsemax(x, axis=-1):
# Sort logits along the specified axis in descending order
logits = convert_to_tensor(x)
logits_sorted = -1.0 * jnp.sort(logits * -1.0, axis=axis)
logits_cumsum = jnp.cumsum(logits_sorted, axis=axis) # find cumulative sum
r = jnp.arange(1, logits.shape[axis] + 1) # Determine the sparsity
r_shape = [1] * logits.ndim
r_shape[axis] = -1 # Broadcast to match the target axis
r = r.reshape(r_shape)
support = logits_sorted - (logits_cumsum - 1) / r > 0
# Find the threshold
k = jnp.sum(support, axis=axis, keepdims=True)
logits_cumsum_safe = jnp.where(support, logits_cumsum, 0.0)
tau = (jnp.sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
output = jnp.maximum(logits - tau, 0.0)
return output
def _convert_to_spatial_operand(
x,
num_spatial_dims,
data_format="channels_last",
include_batch_and_channels=True,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
if not include_batch_and_channels:
return x
if data_format == "channels_last":
x = (1,) + x + (1,)
else:
x = (1,) + (1,) + x
return x
def _pool(
inputs,
initial_value,
reduce_fn,
pool_size,
strides=None,
padding="valid",
):
"""Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
"""
if padding not in ("same", "valid"):
raise ValueError(
f"Invalid padding '{padding}', must be 'same' or 'valid'."
)
padding = padding.upper()
return lax.reduce_window(
inputs,
initial_value,
reduce_fn,
pool_size,
strides,
padding,
)
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding)
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
if padding == "valid":
# Avoid the extra reduce_window.
return pooled / math.prod(pool_size)
else:
# Count the number of valid entries at each input point, then use that
# for computing average. Assumes that any two arrays of same shape will
# be padded the same. Avoid broadcasting on axis where pooling is
# skipped.
shape = [
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
]
window_counts = _pool(
jnp.ones(shape, inputs.dtype),
0.0,
lax.add,
pool_size,
strides,
padding,
)
return pooled / window_counts
def _compute_adaptive_pooling_gather_indices(
input_dim, output_size, big_window
):
"""Compute gather indices for Two-Pool Gather method."""
window_starts = jnp.floor(
(jnp.arange(output_size) * input_dim) / output_size
).astype(jnp.int32)
window_ends = jnp.ceil(
(jnp.arange(1, output_size + 1) * input_dim) / output_size
).astype(jnp.int32)
window_sizes = window_ends - window_starts
is_big = window_sizes == big_window
small_window = big_window - 1
small_len = input_dim - small_window + 1
small_indices = window_starts
big_indices = window_starts + small_len
gather = jnp.where(is_big, big_indices, small_indices)
return gather.astype(jnp.int32)
def _adaptive_average_pool1d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size,)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 1)) # NCL → NLC
n, l, c = inputs.shape
out_l = output_size[0]
small, big = compute_adaptive_pooling_window_sizes(l, out_l)
gather = _compute_adaptive_pooling_gather_indices(l, out_l, big)
small_pool = (
lax.reduce_window(
inputs, 0.0, lax.add, (1, small, 1), (1, 1, 1), "valid"
)
/ small
)
big_pool = (
lax.reduce_window(inputs, 0.0, lax.add, (1, big, 1), (1, 1, 1), "valid")
/ big
)
combined = jnp.concatenate([small_pool, big_pool], axis=1)
out = jnp.take(combined, gather, axis=1)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 2, 1))
return out
def _adaptive_max_pool1d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size,)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 1))
n, l, c = inputs.shape
out_l = output_size[0]
small, big = compute_adaptive_pooling_window_sizes(l, out_l)
gather = _compute_adaptive_pooling_gather_indices(l, out_l, big)
small_pool = lax.reduce_window(
inputs, -jnp.inf, lax.max, (1, small, 1), (1, 1, 1), "valid"
)
big_pool = lax.reduce_window(
inputs, -jnp.inf, lax.max, (1, big, 1), (1, 1, 1), "valid"
)
combined = jnp.concatenate([small_pool, big_pool], axis=1)
out = jnp.take(combined, gather, axis=1)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 2, 1))
return out
def _adaptive_average_pool2d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 3, 1))
n, h, w, c = inputs.shape
out_h, out_w = output_size
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
small_h_pool = (
lax.reduce_window(
inputs, 0.0, lax.add, (1, small_h, 1, 1), (1, 1, 1, 1), "valid"
)
/ small_h
)
big_h_pool = (
lax.reduce_window(
inputs, 0.0, lax.add, (1, big_h, 1, 1), (1, 1, 1, 1), "valid"
)
/ big_h
)
combined_h = jnp.concatenate([small_h_pool, big_h_pool], axis=1)
pooled_h = jnp.take(combined_h, gather_h, axis=1)
small_w_pool = (
lax.reduce_window(
pooled_h, 0.0, lax.add, (1, 1, small_w, 1), (1, 1, 1, 1), "valid"
)
/ small_w
)
big_w_pool = (
lax.reduce_window(
pooled_h, 0.0, lax.add, (1, 1, big_w, 1), (1, 1, 1, 1), "valid"
)
/ big_w
)
combined_w = jnp.concatenate([small_w_pool, big_w_pool], axis=2)
out = jnp.take(combined_w, gather_w, axis=2)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 3, 1, 2))
return out
def _adaptive_max_pool2d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 3, 1))
n, h, w, c = inputs.shape
out_h, out_w = output_size
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
small_h_pool = lax.reduce_window(
inputs, -jnp.inf, lax.max, (1, small_h, 1, 1), (1, 1, 1, 1), "valid"
)
big_h_pool = lax.reduce_window(
inputs, -jnp.inf, lax.max, (1, big_h, 1, 1), (1, 1, 1, 1), "valid"
)
combined_h = jnp.concatenate([small_h_pool, big_h_pool], axis=1)
pooled_h = jnp.take(combined_h, gather_h, axis=1)
small_w_pool = lax.reduce_window(
pooled_h, -jnp.inf, lax.max, (1, 1, small_w, 1), (1, 1, 1, 1), "valid"
)
big_w_pool = lax.reduce_window(
pooled_h, -jnp.inf, lax.max, (1, 1, big_w, 1), (1, 1, 1, 1), "valid"
)
combined_w = jnp.concatenate([small_w_pool, big_w_pool], axis=2)
out = jnp.take(combined_w, gather_w, axis=2)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 3, 1, 2))
return out
def _adaptive_average_pool3d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 3, 4, 1))
n, d, h, w, c = inputs.shape
out_d, out_h, out_w = output_size
small_d, big_d = compute_adaptive_pooling_window_sizes(d, out_d)
gather_d = _compute_adaptive_pooling_gather_indices(d, out_d, big_d)
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
small_d_pool = (
lax.reduce_window(
inputs,
0.0,
lax.add,
(1, small_d, 1, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
/ small_d
)
big_d_pool = (
lax.reduce_window(
inputs, 0.0, lax.add, (1, big_d, 1, 1, 1), (1, 1, 1, 1, 1), "valid"
)
/ big_d
)
combined_d = jnp.concatenate([small_d_pool, big_d_pool], axis=1)
pooled_d = jnp.take(combined_d, gather_d, axis=1)
small_h_pool = (
lax.reduce_window(
pooled_d,
0.0,
lax.add,
(1, 1, small_h, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
/ small_h
)
big_h_pool = (
lax.reduce_window(
pooled_d,
0.0,
lax.add,
(1, 1, big_h, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
/ big_h
)
combined_h = jnp.concatenate([small_h_pool, big_h_pool], axis=2)
pooled_h = jnp.take(combined_h, gather_h, axis=2)
small_w_pool = (
lax.reduce_window(
pooled_h,
0.0,
lax.add,
(1, 1, 1, small_w, 1),
(1, 1, 1, 1, 1),
"valid",
)
/ small_w
)
big_w_pool = (
lax.reduce_window(
pooled_h,
0.0,
lax.add,
(1, 1, 1, big_w, 1),
(1, 1, 1, 1, 1),
"valid",
)
/ big_w
)
combined_w = jnp.concatenate([small_w_pool, big_w_pool], axis=3)
out = jnp.take(combined_w, gather_w, axis=3)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 4, 1, 2, 3))
return out
def _adaptive_max_pool3d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if data_format == "channels_first":
inputs = jnp.transpose(inputs, (0, 2, 3, 4, 1))
n, d, h, w, c = inputs.shape
out_d, out_h, out_w = output_size
small_d, big_d = compute_adaptive_pooling_window_sizes(d, out_d)
gather_d = _compute_adaptive_pooling_gather_indices(d, out_d, big_d)
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
small_d_pool = lax.reduce_window(
inputs,
-jnp.inf,
lax.max,
(1, small_d, 1, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
big_d_pool = lax.reduce_window(
inputs, -jnp.inf, lax.max, (1, big_d, 1, 1, 1), (1, 1, 1, 1, 1), "valid"
)
combined_d = jnp.concatenate([small_d_pool, big_d_pool], axis=1)
pooled_d = jnp.take(combined_d, gather_d, axis=1)
small_h_pool = lax.reduce_window(
pooled_d,
-jnp.inf,
lax.max,
(1, 1, small_h, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
big_h_pool = lax.reduce_window(
pooled_d,
-jnp.inf,
lax.max,
(1, 1, big_h, 1, 1),
(1, 1, 1, 1, 1),
"valid",
)
combined_h = jnp.concatenate([small_h_pool, big_h_pool], axis=2)
pooled_h = jnp.take(combined_h, gather_h, axis=2)
small_w_pool = lax.reduce_window(
pooled_h,
-jnp.inf,
lax.max,
(1, 1, 1, small_w, 1),
(1, 1, 1, 1, 1),
"valid",
)
big_w_pool = lax.reduce_window(
pooled_h,
-jnp.inf,
lax.max,
(1, 1, 1, big_w, 1),
(1, 1, 1, 1, 1),
"valid",
)
combined_w = jnp.concatenate([small_w_pool, big_w_pool], axis=3)
out = jnp.take(combined_w, gather_w, axis=3)
if data_format == "channels_first":
out = jnp.transpose(out, (0, 4, 1, 2, 3))
return out
def adaptive_average_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
dims = inputs.ndim - 2
if dims == 1:
return _adaptive_average_pool1d(inputs, output_size, data_format)
if dims == 2:
return _adaptive_average_pool2d(inputs, output_size, data_format)
if dims == 3:
return _adaptive_average_pool3d(inputs, output_size, data_format)
raise ValueError("adaptive_average_pool supports only 1D/2D/3D inputs")
def adaptive_max_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
dims = inputs.ndim - 2
if dims == 1:
return _adaptive_max_pool1d(inputs, output_size, data_format)
if dims == 2:
return _adaptive_max_pool2d(inputs, output_size, data_format)
if dims == 3:
return _adaptive_max_pool3d(inputs, output_size, data_format)
raise ValueError("adaptive_max_pool supports only 1D/2D/3D inputs")
def _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format="channels_last",
transpose=False,
):
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if data_format == "channels_last":
spatial_dims = tuple(range(1, num_dims - 1))
inputs_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
inputs_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[-2]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel's in_channels. Received input channels {channels} and "
f"kernel in_channels {kernel_in_channels}. "
)
feature_group_count = channels // kernel_in_channels
kernel = convert_to_tensor(kernel)
inputs = convert_to_tensor(inputs, dtype=kernel.dtype)
result = jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
if result.size == 0:
raise ValueError(
"The convolution operation resulted in an empty output. "
"This can happen if the input is too small for the given "
"kernel size, strides, dilation rate, and padding mode. "
"Please check the input shape and convolution parameters."
)
return result
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
feature_group_count = (
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
)
kernel = convert_to_tensor(kernel)
inputs = convert_to_tensor(inputs)
kernel = jnp.reshape(
kernel,
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
)
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
padding_values = compute_conv_transpose_padding_args_for_jax(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
return jax.lax.conv_transpose(
inputs,
kernel,
strides,
padding=padding_values,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
transpose_kernel=True,
)
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
x = convert_to_tensor(x)
if sparse:
if axis < 0:
axis = axis + len(x.shape) + 1
if dtype is None:
dtype = "float32"
# We deal with negative inputs by having zeros in the output although
# it's useless. It makes shapes static.
values = jnp.greater_equal(jnp.ravel(x), 0).astype(dtype)
values_count = values.shape[0]
indices = [jnp.arange(dim) for dim in x.shape]
indices = list(jnp.meshgrid(*indices, indexing="ij"))
indices.insert(axis, jnp.maximum(x, 0)) # Deal with negative indices
indices = [a.reshape(values_count, 1).astype("int32") for a in indices]
indices = jnp.concatenate(indices, axis=1)
shape = list(x.shape)
shape.insert(axis, num_classes)
shape = tuple(shape)
return jax_sparse.BCOO(
(values, indices),
shape=shape,
indices_sorted=True,
unique_indices=True,
)
return jnn.one_hot(x, num_classes, axis=axis, dtype=dtype)
def multi_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
x = convert_to_tensor(x)
reduction_axis = 1 if len(x.shape) > 1 else 0
if sparse:
result = one_hot(
x, num_classes, axis=axis, dtype="int32", sparse=sparse
)
# JAX's BCOO does not support max reduction, use sum and compare with 0.
result = jax_sparse.bcoo_reduce_sum(result, axes=(reduction_axis,))
result = jax_sparse.bcoo_sum_duplicates(result)
values = jnp.greater_equal(result.data, 0).astype(dtype)
return jax_sparse.BCOO(
(values, result.indices),
shape=result.shape,
indices_sorted=True,
unique_indices=True,
)
return jnp.max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = jnp.log(output)
return -jnp.sum(target * log_prob, axis=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target, dtype="int32")
output = jnp.array(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = jnp.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = jnp.log(output)
target = jnn.one_hot(target, output.shape[axis], axis=axis)
return -jnp.sum(target * log_prob, axis=axis)
def binary_crossentropy(target, output, from_logits=False):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_logits = jax.nn.log_sigmoid(output)
log_neg_logits = jax.nn.log_sigmoid(-output)
return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
bce = target * jnp.log(output)
bce += (1.0 - target) * jnp.log(1.0 - output)
return -bce
def moments(x, axes, keepdims=False, synchronized=False):
if synchronized:
raise NotImplementedError(
"Argument synchronized=True is not supported with JAX."
)
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = backend.standardize_dtype(x.dtype)
if ori_dtype in ("float16", "bfloat16"):
need_cast = True
x = cast(x, "float32")
mean = jnp.mean(x, axes, keepdims=True)
variance = jnp.var(x, axis=axes, keepdims=True)
if not keepdims:
mean = jnp.squeeze(mean, axes)
variance = jnp.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = jnp.clip(
mean, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
variance = jnp.clip(
variance, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
shape = [1] * len(x.shape)
shape[axis] = mean.shape[0]
mean = jnp.reshape(mean, shape)
variance = jnp.reshape(variance, shape)
inv = jax.lax.rsqrt(variance + epsilon)
if scale is not None:
scale = jnp.reshape(scale, shape)
inv = inv * scale
res = -mean * inv
if offset is not None:
offset = jnp.reshape(offset, shape)
res = res + offset
return jnp.add(x * inv, res)
def ctc_loss(target, output, target_length, output_length, mask_index=0):
# Ref: https://github.com/google-deepmind/optax
# optax.ctc_loss_with_forward_probs
target = convert_to_tensor(target, dtype="int32")
output = convert_to_tensor(output)
target_length = convert_to_tensor(target_length, "int32")
output_length = convert_to_tensor(output_length, "int32")
batch_size, max_input_length, num_classes = output.shape
batch_size, max_label_length = target.shape
log_epsilon = -1e5
# Ensure that the dtype promotion behavior matches that of `tf.nn.ctc_loss`
dtype = backend.result_type(output.dtype, "float32")
output = cast(output, dtype)
def _lengths_to_paddings(lengths, max_length):
indices = jnp.arange(max_length).reshape(
(1,) * lengths.ndim + (max_length,)
)
lengths = jnp.expand_dims(lengths, axis=-1)
elem_valid = indices < lengths
return jnp.logical_not(elem_valid)
target_paddings = _lengths_to_paddings(target_length, max_label_length)
output_paddings = _lengths_to_paddings(output_length, max_input_length)
target_paddings = target_paddings.astype(output.dtype)
output_paddings = output_paddings.astype(output.dtype)
logprobs = jnn.log_softmax(output)
label_lengths = max_label_length - jnp.sum(target_paddings, axis=1).astype(
jnp.int32
)
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
repeat = (target[:, :-1] == target[:, 1:]).astype(jnp.float32)
repeat = jnp.pad(repeat, ((0, 0), (0, 1)))
logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1]
logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
_one_hot = jax.nn.one_hot(
target, num_classes=num_classes, dtype=logprobs.dtype
) # [B, N, K]
logprobs_emit = jnp.einsum("btk,bnk->btn", logprobs, _one_hot)
logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
# [B, N]
logalpha_phi_init = (
jnp.ones((batch_size, max_label_length + 1), dtype=output.dtype)
* log_epsilon
)
logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/linalg.py | keras/src/backend/jax/linalg.py | import jax
import jax.numpy as jnp
import jax.scipy as jsp
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import convert_to_tensor
def cholesky(a, upper=False):
out = jnp.linalg.cholesky(a, upper=upper)
try:
# In eager mode, raise for nan to
# achieve behavior consistency with numpy
if jnp.any(jnp.isnan(out)):
raise ValueError(
"Cholesky decomposition failed. "
"The input might not be a valid "
"positive definite matrix."
)
except jax.errors.TracerBoolConversionError:
# Cannot raise for nan in tracing mode
pass
return out
def cholesky_inverse(a, upper=False):
identity = jnp.eye(a.shape[-1], dtype=a.dtype)
inv_chol = solve_triangular(a, identity, lower=not upper)
if upper:
a_inv = jnp.matmul(inv_chol, jnp.transpose(inv_chol))
else:
a_inv = jnp.matmul(jnp.transpose(inv_chol), inv_chol)
return a_inv
def det(a):
return jnp.linalg.det(a)
def eig(x):
return jnp.linalg.eig(x)
def eigh(x):
return jnp.linalg.eigh(x)
def inv(a):
return jnp.linalg.inv(a)
def lu_factor(x):
lu_factor_fn = jsp.linalg.lu_factor
if x.ndim > 2:
for i in range(x.ndim - 2):
lu_factor_fn = jax.vmap(lu_factor_fn)
return lu_factor_fn(x)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return jnp.linalg.qr(x, mode=mode)
def solve(a, b):
return jnp.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
return jsp.linalg.solve_triangular(a, b, lower=lower)
def svd(x, full_matrices=True, compute_uv=True):
return jnp.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return jnp.linalg.lstsq(a, b, rcond=rcond)[0]
def jvp(fun, primals, tangents, has_aux=False):
return jax.jvp(fun, primals, tangents, has_aux=has_aux)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/tensorboard.py | keras/src/backend/jax/tensorboard.py | from keras.src.utils.module_utils import jax
def start_trace(logdir):
if logdir:
jax.profiler.start_trace(logdir)
def stop_trace(save):
if save:
jax.profiler.stop_trace()
def start_batch_trace(batch):
batch_trace_context = jax.profiler.TraceAnnotation(
f"Profiled batch {batch}"
)
batch_trace_context.__enter__()
return batch_trace_context
def stop_batch_trace(batch_trace_context):
batch_trace_context.__exit__(None, None, None)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/numpy.py | keras/src/backend/jax/numpy.py | import builtins
import math
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from jax import export as jax_export
from keras.src.backend import config
from keras.src.backend.common import dtypes
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import to_tuple_or_list
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.jax import nn
from keras.src.backend.jax import sparse
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import convert_to_tensor
def rot90(array, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the specified plane."""
if array.ndim < 2:
raise ValueError(
f"Input array must have at least 2 dimensions. "
f"Received: array.ndim={array.ndim}"
)
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(
f"Invalid axes: {axes}. Axes must be a tuple of "
"two different dimensions."
)
return jnp.rot90(array, k=k, axes=axes)
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
def add(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.add(x1, x2)
def bartlett(x):
x = convert_to_tensor(x)
return cast(jnp.bartlett(x), config.floatx())
def hamming(x):
x = convert_to_tensor(x)
return cast(jnp.hamming(x), config.floatx())
def hanning(x):
x = convert_to_tensor(x)
return cast(jnp.hanning(x), config.floatx())
def heaviside(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.heaviside(x1, x2)
def hypot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.hypot(x1, x2)
def kaiser(x, beta):
x = convert_to_tensor(x)
return cast(jnp.kaiser(x, beta), config.floatx())
def bincount(x, weights=None, minlength=0, sparse=False):
# Note: bincount is never traceable / jittable because the output shape
# depends on the values in x.
if sparse or isinstance(x, jax_sparse.BCOO):
if isinstance(x, jax_sparse.BCOO):
if weights is not None:
if not isinstance(weights, jax_sparse.BCOO):
raise ValueError("`x` and `weights` must both be BCOOs")
if x.indices is not weights.indices:
# This test works in eager mode only
if not jnp.all(jnp.equal(x.indices, weights.indices)):
raise ValueError(
"`x` and `weights` BCOOs must have the same indices"
)
weights = weights.data
x = x.data
reduction_axis = 1 if len(x.shape) > 1 else 0
maxlength = jnp.maximum(jnp.max(x) + 1, minlength)
one_hot_encoding = nn.one_hot(x, maxlength, sparse=True)
if weights is not None:
expanded_weights = jnp.expand_dims(weights, reduction_axis + 1)
one_hot_encoding = one_hot_encoding * expanded_weights
outputs = jax_sparse.bcoo_reduce_sum(
one_hot_encoding,
axes=(reduction_axis,),
)
return outputs
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return jnp.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return jnp.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return jnp.stack(bincounts)
return jnp.bincount(x, weights=weights, minlength=minlength)
def einsum(subscripts, *operands, **kwargs):
operands = [convert_to_tensor(x) for x in operands]
# When all operands are of int8, specifying `preferred_element_type` as
# int32 to enable hardware-accelerated einsum
dtypes = list(set(standardize_dtype(x.dtype) for x in operands))
if len(dtypes) == 1 and dtypes[0] == "int8":
preferred_element_type = "int32"
else:
preferred_element_type = None
kwargs["preferred_element_type"] = preferred_element_type
return jnp.einsum(subscripts, *operands, **kwargs)
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
def subtract(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
# When both x1 and x2 are of int8, specifying `preferred_element_type` as
# int32 to enable hardware-accelerated matmul
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype == "int8" and x2_dtype == "int8":
preferred_element_type = "int32"
else:
preferred_element_type = None
if isinstance(x1, jax_sparse.JAXSparse) or isinstance(
x2, jax_sparse.JAXSparse
):
if not hasattr(matmul, "sparse_matmul"):
matmul.sparse_matmul = jax_sparse.sparsify(jnp.matmul)
if isinstance(x1, jax_sparse.BCOO):
x1 = jax_sparse.bcoo_update_layout(
x1, n_batch=len(x1.shape) - 2, on_inefficient="warn"
)
if isinstance(x2, jax_sparse.BCOO):
x2 = jax_sparse.bcoo_update_layout(
x2, n_batch=len(x2.shape) - 2, on_inefficient="warn"
)
return matmul.sparse_matmul(
x1, x2, preferred_element_type=preferred_element_type
)
return jnp.matmul(x1, x2, preferred_element_type=preferred_element_type)
def multiply(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
if isinstance(x1, jax_sparse.BCOO):
if isinstance(x2, jax_sparse.BCOO):
# x1 is sparse, x2 is sparse.
if x1.indices is x2.indices:
# `bcoo_multiply_sparse` will not detect that the indices are
# the same, optimize this case here.
if not x1.unique_indices:
x1 = jax_sparse.bcoo_sum_duplicates(x1)
x2 = jax_sparse.bcoo_sum_duplicates(x2)
return jax_sparse.BCOO(
(jnp.multiply(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=True,
unique_indices=True,
)
else:
return jax_sparse.bcoo_multiply_sparse(x1, x2)
else:
# x1 is sparse, x2 is dense.
out_data = jax_sparse.bcoo_multiply_dense(x1, x2)
return jax_sparse.BCOO(
(out_data, x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
elif isinstance(x2, jax_sparse.BCOO):
# x1 is dense, x2 is sparse.
out_data = jax_sparse.bcoo_multiply_dense(x2, x1)
return jax_sparse.BCOO(
(out_data, x2.indices),
shape=x2.shape,
indices_sorted=x2.indices_sorted,
unique_indices=x2.unique_indices,
)
return jnp.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# `jnp.mean` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
compute_dtype = dtypes.result_type(x.dtype, "float32")
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = compute_dtype
else:
result_dtype = ori_dtype
if isinstance(x, jax_sparse.BCOO):
if axis is None:
axis = tuple(range(len(x.shape)))
(
canonical_axis,
keep_dims_shape,
broadcast_dimensions,
) = sparse.axis_shape_dims_for_broadcast_in_dim(
axis, x.shape, insert_dims=False
)
divisor = math.prod(x.shape[i] for i in canonical_axis)
output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis)
output = jax_sparse.BCOO(
(output.data.astype(result_dtype) / divisor, output.indices),
shape=output.shape,
)
if keepdims:
# `bcoo_reduce_sum` does not support keepdims, neither does
# sparsify(jnp.sum), so we recreate the empty dimensions.
output = jax_sparse.bcoo_broadcast_in_dim(
output,
shape=keep_dims_shape,
broadcast_dimensions=broadcast_dimensions,
)
return output
else:
output = jnp.mean(x, axis=axis, keepdims=keepdims, dtype=compute_dtype)
return cast(output, result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
return jnp.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.ones(shape, dtype=dtype)
def zeros(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.zeros(shape, dtype=dtype)
@sparse.elementwise_unary(linear=False)
def absolute(x):
x = convert_to_tensor(x)
return jnp.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
return jnp.all(x, axis=axis, keepdims=keepdims)
def angle(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.angle(x)
def any(x, axis=None, keepdims=False):
return jnp.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
return jnp.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
return jnp.amin(x, axis=axis, keepdims=keepdims)
def append(x1, x2, axis=None):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.append(x1, x2, axis=axis)
def arange(start, stop=None, step=None, dtype=None):
def get_dtype(x):
if hasattr(x, "dtype"):
return x.dtype
if jax_export.is_symbolic_dim(x):
return int
return type(x)
if dtype is None:
dtypes_to_resolve = [get_dtype(start)]
if stop is not None:
dtypes_to_resolve.append(get_dtype(stop))
if step is not None:
dtypes_to_resolve.append(get_dtype(step))
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = standardize_dtype(dtype)
return jnp.arange(start, stop, step=step, dtype=dtype)
@sparse.densifying_unary
def arccos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arccos(x)
@sparse.densifying_unary
def arccosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arccosh(x)
@sparse.elementwise_unary(linear=False)
def arcsin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arcsin(x)
@sparse.elementwise_unary(linear=False)
def arcsinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arcsinh(x)
@sparse.elementwise_unary(linear=False)
def arctan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arctan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return jnp.arctan2(x1, x2)
@sparse.elementwise_unary(linear=False)
def arctanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arctanh(x)
def argmax(x, axis=None, keepdims=False):
from keras.src.testing.test_case import uses_cpu
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "float" not in dtype or not uses_cpu() or x.ndim == 0:
return jnp.argmax(x, axis=axis, keepdims=keepdims)
# Fix the flush-to-zero (FTZ) issue based on this issue:
# https://github.com/jax-ml/jax/issues/24280
dtype = dtypes.result_type(dtype, "float32")
x = cast(x, dtype)
is_negative_zero = (x == 0.0) & jnp.signbit(x)
x = jnp.where(is_negative_zero, -jnp.finfo(x.dtype).tiny, x)
return jnp.argmax(x, axis=axis, keepdims=keepdims)
def argmin(x, axis=None, keepdims=False):
from keras.src.testing.test_case import uses_cpu
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "float" not in dtype or not uses_cpu() or x.ndim == 0:
return jnp.argmin(x, axis=axis, keepdims=keepdims)
# Fix the flush-to-zero (FTZ) issue based on this issue:
# https://github.com/jax-ml/jax/issues/24280
dtype = dtypes.result_type(dtype, "float32")
x = cast(x, dtype)
is_negative_zero = (x == 0.0) & jnp.signbit(x)
x = jnp.where(is_negative_zero, -jnp.finfo(x.dtype).tiny, x)
return jnp.argmin(x, axis=axis, keepdims=keepdims)
def argsort(x, axis=-1):
x = convert_to_tensor(x)
if x.ndim == 0:
return jnp.argsort(x, axis=None)
return jnp.argsort(x, axis=axis)
def array(x, dtype=None):
return jnp.array(x, dtype=dtype)
def view(x, dtype=None):
x = convert_to_tensor(x)
return x.view(dtype=dtype)
def average(x, axis=None, weights=None):
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype, float]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
x = cast(x, dtype)
if weights is not None:
weights = cast(weights, dtype)
return jnp.average(x, weights=weights, axis=axis)
def bitwise_and(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return jnp.bitwise_and(x, y)
def bitwise_invert(x):
x = convert_to_tensor(x)
return jnp.invert(x)
def bitwise_not(x):
return bitwise_invert(x)
def bitwise_or(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return jnp.bitwise_or(x, y)
def bitwise_xor(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return jnp.bitwise_xor(x, y)
def bitwise_left_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
return jnp.left_shift(x, y)
def left_shift(x, y):
return bitwise_left_shift(x, y)
def bitwise_right_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
return jnp.right_shift(x, y)
def right_shift(x, y):
return bitwise_right_shift(x, y)
def blackman(x):
x = convert_to_tensor(x)
return cast(jnp.blackman(x), config.floatx())
def broadcast_to(x, shape):
x = convert_to_tensor(x)
return jnp.broadcast_to(x, shape)
def cbrt(x):
x = convert_to_tensor(x)
return jnp.cbrt(x)
@sparse.elementwise_unary(linear=False)
def ceil(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.ceil(x)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "int32")
return jnp.clip(x, x_min, x_max)
def concatenate(xs, axis=0):
bcoo_count = builtins.sum(isinstance(x, jax_sparse.BCOO) for x in xs)
if bcoo_count == len(xs):
axis = canonicalize_axis(axis, len(xs[0].shape))
return jax_sparse.bcoo_concatenate(xs, dimension=axis)
elif bcoo_count:
xs = [
x.todense()
if isinstance(x, jax_sparse.JAXSparse)
else convert_to_tensor(x)
for x in xs
]
else:
xs = [convert_to_tensor(x) for x in xs]
return jnp.concatenate(xs, axis=axis)
@sparse.elementwise_unary(linear=True)
def conjugate(x):
x = convert_to_tensor(x)
return jnp.conjugate(x)
@sparse.elementwise_unary(linear=True)
def conj(x):
x = convert_to_tensor(x)
return jnp.conjugate(x)
@sparse.elementwise_unary(linear=True)
def copy(x):
x = convert_to_tensor(x)
return jnp.copy(x)
@sparse.densifying_unary
def cos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.cos(x)
@sparse.densifying_unary
def cosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.cosh(x)
def count_nonzero(x, axis=None):
return cast(jnp.count_nonzero(x, axis=axis), "int32")
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None, dtype=None):
x = convert_to_tensor(x)
return jnp.cumprod(x, axis=axis, dtype=dtype)
def cumsum(x, axis=None, dtype=None):
x = convert_to_tensor(x)
return jnp.cumsum(x, axis=axis, dtype=dtype)
def deg2rad(x):
x = convert_to_tensor(x)
return jnp.deg2rad(x)
def diag(x, k=0):
x = convert_to_tensor(x)
return jnp.diag(x, k=k)
def diagflat(x, k=0):
x = convert_to_tensor(x)
return jnp.diagflat(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
x = convert_to_tensor(x)
return jnp.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def diff(a, n=1, axis=-1):
a = convert_to_tensor(a)
return jnp.diff(a, n=n, axis=axis)
@sparse.elementwise_unary(linear=False)
def digitize(x, bins):
x = convert_to_tensor(x)
bins = convert_to_tensor(bins)
return jnp.digitize(x, bins)
def dot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.dot(x1, x2)
def empty(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.empty(shape, dtype=dtype)
def empty_like(x, dtype=None):
return jnp.empty_like(x, dtype=dtype)
def equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.equal(x1, x2)
@sparse.densifying_unary
def exp(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return jnp.exp(x)
@sparse.densifying_unary
def exp2(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return jnp.exp2(x)
def expand_dims(x, axis):
x = convert_to_tensor(x)
if isinstance(x, jax_sparse.BCOO):
(
_,
result_shape,
broadcast_dimensions,
) = sparse.axis_shape_dims_for_broadcast_in_dim(
axis, x.shape, insert_dims=True
)
return jax_sparse.bcoo_broadcast_in_dim(
x, shape=result_shape, broadcast_dimensions=broadcast_dimensions
)
return jnp.expand_dims(x, axis)
@sparse.elementwise_unary(linear=False)
def expm1(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return jnp.expm1(x)
def flip(x, axis=None):
return jnp.flip(x, axis=axis)
@sparse.elementwise_unary(linear=False)
def floor(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.floor(x)
def full(shape, fill_value, dtype=None):
dtype = dtype or config.floatx()
return jnp.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return jnp.full_like(x, fill_value, dtype=dtype)
def gcd(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.gcd(x1, x2)
def greater(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater(x1, x2)
def greater_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater_equal(x1, x2)
def hstack(xs):
return jnp.hstack(xs)
def identity(n, dtype=None):
dtype = dtype or config.floatx()
return jnp.identity(n, dtype=dtype)
@sparse.elementwise_unary(linear=True)
def imag(x):
x = convert_to_tensor(x)
return jnp.imag(x)
def isclose(x1, x2, rtol=1e-5, atol=1e-8, equal_nan=False):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.isclose(x1, x2, rtol, atol, equal_nan)
@sparse.densifying_unary
def isfinite(x):
x = convert_to_tensor(x)
return jnp.isfinite(x)
def isin(x1, x2, assume_unique=False, invert=False):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.isin(x1, x2, assume_unique=assume_unique, invert=invert)
@sparse.elementwise_unary(linear=False)
def isinf(x):
x = convert_to_tensor(x)
return jnp.isinf(x)
@sparse.elementwise_unary(linear=False)
def isnan(x):
x = convert_to_tensor(x)
return jnp.isnan(x)
def isneginf(x):
x = convert_to_tensor(x)
return jnp.isneginf(x)
def isposinf(x):
x = convert_to_tensor(x)
return jnp.isposinf(x)
def isreal(x):
x = convert_to_tensor(x)
return jnp.isreal(x)
def kron(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.kron(x1, x2)
def lcm(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.lcm(x1, x2)
def ldexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
if standardize_dtype(x2.dtype) not in dtypes.INT_TYPES:
raise TypeError(
f"ldexp exponent must be an integer type. "
f"Received: x2 dtype={x2.dtype}"
)
return jnp.ldexp(x1, x2)
def less(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less(x1, x2)
def less_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
return jnp.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
@sparse.densifying_unary
def log(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log(x)
@sparse.densifying_unary
def log10(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log10(x)
@sparse.elementwise_unary(linear=False)
def log1p(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log1p(x)
@sparse.densifying_unary
def log2(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log2(x)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return jnp.logaddexp(x1, x2)
def logaddexp2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return jnp.logaddexp2(x1, x2)
def logical_and(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_and(x1, x2)
def logical_not(x):
x = convert_to_tensor(x)
return jnp.logical_not(x)
def logical_or(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
return jnp.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
def maximum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.maximum(x1, x2)
def median(x, axis=None, keepdims=False):
# axis of jnp.median must be hashable
if isinstance(axis, list):
axis = tuple(axis)
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
result = jnp.median(x, axis=axis, keepdims=keepdims)
# TODO: with jax < 0.4.26 jnp.median failed to keepdims when axis is None
if keepdims is True and axis is None:
while result.ndim < x.ndim:
result = jnp.expand_dims(result, axis=-1)
return result
def meshgrid(*x, indexing="xy"):
return jnp.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
return jnp.min(x, axis=axis, keepdims=keepdims, initial=initial)
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
def minimum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.minimum(x1, x2)
def mod(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.mod(x1, x2)
def moveaxis(x, source, destination):
return jnp.moveaxis(x, source=source, destination=destination)
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
x = convert_to_tensor(x)
return jnp.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
def ndim(x):
return jnp.ndim(x)
def nonzero(x):
return jnp.nonzero(x)
def not_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.not_equal(x1, x2)
def ones_like(x, dtype=None):
return jnp.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None):
return jnp.zeros_like(x, dtype=dtype)
def outer(x1, x2):
return jnp.outer(x1, x2)
def pad(x, pad_width, mode="constant", constant_values=None):
x = convert_to_tensor(x)
kwargs = {}
if constant_values is not None:
if mode != "constant":
raise ValueError(
"Argument `constant_values` can only be "
"provided when `mode == 'constant'`. "
f"Received: mode={mode}"
)
kwargs["constant_values"] = constant_values
return jnp.pad(x, pad_width, mode=mode, **kwargs)
def prod(x, axis=None, keepdims=False, dtype=None):
x = convert_to_tensor(x)
return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def quantile(x, q, axis=None, method="linear", keepdims=False):
x = convert_to_tensor(x)
q = convert_to_tensor(q)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
result = jnp.quantile(x, q, axis=axis, method=method, keepdims=keepdims)
# TODO: with jax < 0.4.26 jnp.quantile failed to keepdims when axis is None
if keepdims is True and axis is None:
result_ndim = x.ndim + (1 if len(q.shape) > 0 else 0)
while result.ndim < result_ndim:
result = jnp.expand_dims(result, axis=-1)
return result
def ravel(x):
x = convert_to_tensor(x)
return jnp.ravel(x)
def unravel_index(indices, shape):
indices = convert_to_tensor(indices)
return jnp.unravel_index(indices, shape)
@sparse.elementwise_unary(linear=True)
def real(x):
x = convert_to_tensor(x)
return jnp.real(x)
@sparse.densifying_unary
def reciprocal(x):
x = convert_to_tensor(x)
return jnp.reciprocal(x)
def repeat(x, repeats, axis=None):
x = convert_to_tensor(x)
return jnp.repeat(x, repeats, axis=axis)
def reshape(x, newshape):
if isinstance(x, jax_sparse.BCOO):
from keras.src.ops import operation_utils
# Resolve the -1 in `new_shape` if applicable and possible
output_shape = operation_utils.compute_reshape_output_shape(
x.shape, newshape, "new_shape"
)
if None not in output_shape:
newshape = output_shape
return jax_sparse.bcoo_reshape(x, new_sizes=newshape)
x = convert_to_tensor(x)
return jnp.reshape(x, newshape)
def roll(x, shift, axis=None):
return jnp.roll(x, shift, axis=axis)
def searchsorted(sorted_sequence, values, side="left"):
if ndim(sorted_sequence) != 1:
raise ValueError(
"`searchsorted` only supports 1-D sorted sequences. "
"You can use `keras.ops.vectorized_map` "
"to extend it to N-D sequences. Received: "
f"sorted_sequence.shape={sorted_sequence.shape}"
)
return jnp.searchsorted(sorted_sequence, values, side=side)
@sparse.elementwise_unary(linear=False)
def sign(x):
x = convert_to_tensor(x)
return jnp.sign(x)
@sparse.elementwise_unary(linear=False)
def signbit(x):
x = convert_to_tensor(x)
return jnp.signbit(x)
@sparse.elementwise_unary(linear=False)
def sin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.sin(x)
@sparse.elementwise_unary(linear=False)
def sinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.sinh(x)
def size(x):
return jnp.size(x)
def sort(x, axis=-1):
x = convert_to_tensor(x)
return jnp.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
x = convert_to_tensor(x)
return jnp.split(x, indices_or_sections, axis=axis)
def array_split(x, indices_or_sections, axis=0):
x = convert_to_tensor(x)
return jnp.array_split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
x = [convert_to_tensor(t) for t in x]
return jnp.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
x = convert_to_tensor(x)
return jnp.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices, sparse=False)
return jnp.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices, sparse=False)
return jnp.take_along_axis(x, indices, axis=axis)
@sparse.elementwise_unary(linear=False)
def tan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.tan(x)
@sparse.elementwise_unary(linear=False)
def tanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.tanh(x)
def tensordot(x1, x2, axes=2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.tensordot(x1, x2, axes=axes)
@sparse.elementwise_unary(linear=False)
def round(x, decimals=0):
x = convert_to_tensor(x)
# jnp.round doesn't support decimals < 0 for integers
x_dtype = standardize_dtype(x.dtype)
if "int" in x_dtype and decimals < 0:
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/trainer.py | keras/src/backend/jax/trainer.py | import collections
import itertools
import warnings
from functools import partial
import jax
import numpy as np
from keras.src import backend
from keras.src import callbacks as callbacks_module
from keras.src import optimizers as optimizers_module
from keras.src import tree
from keras.src.backend import config
from keras.src.backend import distribution_lib as jax_distribution_lib
from keras.src.backend.config import is_nnx_enabled
from keras.src.distribution import distribution_lib
from keras.src.trainers import trainer as base_trainer
from keras.src.trainers.data_adapters import array_slicing
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.epoch_iterator import EpochIterator
from keras.src.utils import traceback_utils
if is_nnx_enabled():
from flax import nnx
jit = nnx.jit
else:
jit = jax.jit
class JAXTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
self._jax_state_synced = True
def compute_loss_and_updates(
self,
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=False,
optimizer_variables=None,
):
"""This method is stateless and is intended for use with jax.grad."""
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = training
# Run stateless forward pass
y_pred, non_trainable_variables, losses = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
return_losses=True,
**kwargs,
)
if losses:
# Make forward pass losses available to compute_loss.
self._losses_override.clear()
self._losses_override = losses
loss, variables = self.stateless_compute_loss(
trainable_variables,
non_trainable_variables,
metrics_variables,
x=x,
y=y,
y_pred=y_pred,
sample_weight=sample_weight,
training=training,
)
if losses:
self._losses_override.clear()
(trainable_variables, non_trainable_variables, metrics_variables) = (
variables
)
# Handle loss scaling
unscaled_loss = loss
if training and self.optimizer is not None:
# Scale loss with a StatelessScope, to use an update scale variable.
mapping = list(zip(self.optimizer.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping):
loss = self.optimizer.scale_loss(loss)
return loss, (
unscaled_loss,
y_pred,
non_trainable_variables,
metrics_variables,
)
def _update_metrics_variables(
self, metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
):
with backend.StatelessScope(
state_mapping=[
(ref_v, v)
for ref_v, v in zip(self.metrics_variables, metrics_variables)
]
) as scope:
self._loss_tracker.update_state(
unscaled_loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
logs = self.compute_metrics(x, y, y_pred, sample_weight)
new_metrics_variables = []
for ref_v in self.metrics_variables:
new_v = scope.get_current_value(ref_v)
if new_v is None:
new_v = ref_v.value
new_metrics_variables.append(new_v)
return logs, new_metrics_variables
def train_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
grad_fn = jax.value_and_grad(
self.compute_loss_and_updates, has_aux=True
)
(loss, aux), grads = grad_fn(
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=True,
optimizer_variables=optimizer_variables,
)
(unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = (
aux
)
(
trainable_variables,
optimizer_variables,
) = self.optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
logs, metrics_variables = self._update_metrics_variables(
metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
)
state = (
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
)
return logs, state
def test_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
loss, aux = self.compute_loss_and_updates(
trainable_variables,
non_trainable_variables,
metrics_variables,
x,
y,
sample_weight,
training=False,
)
(unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = (
aux
)
logs, metrics_variables = self._update_metrics_variables(
metrics_variables, unscaled_loss, x, y, y_pred, sample_weight
)
state = (
trainable_variables,
non_trainable_variables,
metrics_variables,
)
return logs, state
def predict_step(self, state, data):
trainable_variables, non_trainable_variables = state
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = False
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
outputs, non_trainable_variables = self.stateless_call(
trainable_variables, non_trainable_variables, x, **kwargs
)
return outputs, non_trainable_variables
def _make_function(self, step_function, concatenate_outputs=False):
if self.steps_per_execution > 1:
if concatenate_outputs:
def concatenate(outputs):
output = outputs[0]
for next_output in outputs[1:]:
output = tree.map_structure(
lambda t1, t2: jax.numpy.concatenate([t1, t2]),
output,
next_output,
)
return output
if not self.run_eagerly and self.jit_compile:
concatenate = jit(concatenate)
def iterator_step(state, iterator):
data = next(iterator)
outputs, state = step_function(state, data)
outputs = [outputs]
try:
for _ in range(self.steps_per_execution - 1):
data = next(iterator)
_outputs, state = step_function(state, data)
outputs.append(_outputs)
except StopIteration:
pass
outputs = concatenate(outputs)
return outputs, state
else:
def iterator_step(state, iterator):
data = next(iterator)
outputs, state = step_function(state, data)
try:
for _ in range(self.steps_per_execution - 1):
data = next(iterator)
outputs, state = step_function(state, data)
except StopIteration:
pass
return outputs, state
else:
def iterator_step(state, iterator):
return step_function(state, next(iterator))
return iterator_step
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
state_shardings = self._get_state_sharding_spec()
out_shardings = (None, state_shardings)
if is_nnx_enabled():
step_fn = lambda state, data: type(self).train_step(
self, state, data
)
else:
step_fn = self.train_step
train_step = jit(
step_fn,
donate_argnums=0,
out_shardings=out_shardings,
)
else:
train_step = self.train_step
step_function = self._make_function(train_step)
self.train_function = step_function
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
(
trainable_shardings,
non_trainable_shardings,
_, # optimizer_shardings
metrics_shardings,
) = self._get_state_sharding_spec()
state_shardings = (
trainable_shardings,
non_trainable_shardings,
metrics_shardings,
)
out_shardings = (None, state_shardings)
if is_nnx_enabled():
step_fn = lambda state, data: type(self).test_step(
self, state, data
)
else:
step_fn = self.test_step
test_step = jit(
step_fn,
donate_argnums=0,
out_shardings=out_shardings,
)
else:
test_step = self.test_step
step_function = self._make_function(test_step)
self.test_function = step_function
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def predict_step(state, data):
outputs, non_trainable_variables = self.predict_step(state, data)
return outputs, (state[0], non_trainable_variables)
if not self.run_eagerly and self.jit_compile:
out_shardings = None
if distribution_lib.distribution() is not None:
(
trainable_shardings,
non_trainable_shardings,
_, # optimizer_shardings
_, # metrics_shardings
) = self._get_state_sharding_spec()
state_shardings = (
trainable_shardings,
non_trainable_shardings,
)
out_shardings = (None, state_shardings)
predict_step = jit(
predict_step,
donate_argnums=0,
out_shardings=out_shardings,
)
_step_function = self._make_function(
predict_step, concatenate_outputs=True
)
def step_function(state, iterator):
outputs, state = _step_function(state, iterator)
return outputs, state
self.predict_function = step_function
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
self._assert_compile_called("fit")
# Possibly cap epochs for debugging runs.
max_epochs = config.max_epochs()
if max_epochs and max_epochs < epochs:
warnings.warn("Limiting epochs to %d" % max_epochs)
epochs = max_epochs
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
(
(x, y, sample_weight),
validation_data,
) = array_slicing.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_train_function()
self.stop_training = False
training_logs = {}
training_finished = False
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
try:
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
self._jax_state_synced = True
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
# Callbacks
callbacks.on_train_batch_begin(begin_step)
# Train step
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.train_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state
# sync if they need to.
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_train_batch_end(end_step, logs)
if self.stop_training:
# Stop training if a callback has set
# this flag in on_(train_)batch_end.
break
# Reattach state to the model
# (if not already done by a callback).
# NOTE: doing this after each step would be a big performance
# bottleneck.
self.jax_state_sync()
# Override with model metrics instead of last step logs if
# needed.
epoch_logs = dict(self._get_metrics_result_or_logs(logs))
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create JAXEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = JAXEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
f"val_{name}": val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
training_finished = True
finally:
self.jax_state_sync()
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs
# are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
if training_finished:
callbacks.on_train_end(logs=training_logs)
self._jax_state = None
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
self._assert_compile_called("evaluate")
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of
# input/target data.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
self._jax_state_synced = True
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.test_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state sync
# if they need to.
self._jax_state = {
# I wouldn't recommend modifying non-trainable model state
# during evaluate(), but it's allowed.
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
# Reattach state back to model (if not already done by a callback).
self.jax_state_sync()
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
self._jax_state = None
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = JAXEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
if not all(layer.built for layer in self._flatten_layers()):
# Build the model on one batch of data.
for _, _, iterator in epoch_iterator:
# Build model
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(
next(iterator)
)
if is_nnx_enabled():
self(x)
else:
with backend.StatelessScope():
self(x)
break
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self._jax_state_synced = True
outputs = None
non_trainable_variables = None
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
batch_outputs, state = self.predict_function(state, iterator)
(
trainable_variables,
non_trainable_variables,
) = state
self._jax_state = {
"trainable_variables": trainable_variables,
# I wouldn't recommend modifying non-trainable model state
# during predict(), but it's allowed.
"non_trainable_variables": non_trainable_variables,
}
outputs = append_to_outputs(batch_outputs, outputs)
# Dispatch callbacks. This takes care of async dispatch.
callbacks.on_predict_batch_end(
end_step, {"outputs": batch_outputs}
)
if self.stop_predicting:
break
self.jax_state_sync()
callbacks.on_predict_end()
self._jax_state = None
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
def data():
yield _distribute_data((x, y, sample_weight))
# Maybe build model
self._symbolic_build(data_batch=next(data()))
self.make_train_function()
# Train step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.train_function(state, data())
# State sync
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
def data():
yield _distribute_data((x, y, sample_weight))
# Maybe build model
self._symbolic_build(data_batch=next(data()))
self.make_test_function()
# Test step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.test_function(state, data())
# State sync
trainable_variables, non_trainable_variables, metrics_variables = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values.
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
if not all(layer.built for layer in self._flatten_layers()):
# Build model
with backend.StatelessScope():
self(x)
self.make_predict_function()
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=False,
purge_model_variables=False,
)
self._jax_state_synced = False
def data():
yield (x,)
batch_outputs, state = self.predict_function(state, data())
trainable_variables, non_trainable_variables = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
}
self.jax_state_sync()
batch_outputs = tree.map_structure(lambda x: np.array(x), batch_outputs)
return batch_outputs
def jax_state_sync(self):
if not getattr(self, "_jax_state", None) or self._jax_state_synced:
return
trainable_variables = self._jax_state.get("trainable_variables", None)
non_trainable_variables = self._jax_state.get(
"non_trainable_variables", None
)
optimizer_variables = self._jax_state.get("optimizer_variables", None)
metrics_variables = self._jax_state.get("metrics_variables", None)
if trainable_variables:
for ref_v, v in zip(self.trainable_variables, trainable_variables):
ref_v.assign(v)
if non_trainable_variables:
for ref_v, v in zip(
self.non_trainable_variables, non_trainable_variables
):
ref_v.assign(v)
if optimizer_variables:
for ref_v, v in zip(self.optimizer.variables, optimizer_variables):
ref_v.assign(v)
if metrics_variables:
for ref_v, v in zip(self.metrics_variables, metrics_variables):
ref_v.assign(v)
self._jax_state_synced = True
def _get_state_sharding_spec(self):
trainable_shardings = [
v.value.sharding for v in self.trainable_variables
]
non_trainable_shardings = [
v.value.sharding for v in self.non_trainable_variables
]
if hasattr(self, "optimizer") and self.optimizer is not None:
optimizer_shardings = [
v.value.sharding for v in self.optimizer.variables
]
else:
optimizer_shardings = []
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/random.py | keras/src/backend/jax/random.py | import jax
from keras.src.backend.config import floatx
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
def jax_draw_seed(seed):
if isinstance(seed, jax.Array):
return seed
else:
return draw_seed(seed)
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
sample = jax.random.normal(seed, shape=shape, dtype=dtype)
return sample * stddev + mean
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
return jax.random.uniform(
seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval
)
def categorical(logits, num_samples, dtype="int32", seed=None):
seed = jax_draw_seed(seed)
output_shape = list(logits.shape)
output_shape[1] = num_samples
output_shape = tuple(output_shape)
output = jax.random.categorical(
seed, logits[..., None], shape=output_shape, axis=1
)
return output.astype(dtype)
def randint(shape, minval, maxval, dtype="int32", seed=None):
seed = jax_draw_seed(seed)
return jax.random.randint(
seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
sample = jax.random.truncated_normal(
seed, shape=shape, lower=-2.0, upper=2.0, dtype=dtype
)
return sample * stddev + mean
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
seed = jax_draw_seed(seed)
keep_prob = 1.0 - rate
# The `noise_shape` may contain `None` so we need to convert it
# into a concrete shape before passing it on to jax.
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
mask = jax.random.bernoulli(seed, p=keep_prob, shape=noise_shape)
mask = jax.numpy.broadcast_to(mask, inputs.shape)
return jax.lax.select(
mask, inputs / keep_prob, jax.numpy.zeros_like(inputs)
)
def shuffle(x, axis=0, seed=None):
seed = jax_draw_seed(seed)
return jax.random.permutation(seed, x, axis, independent=True)
def gamma(shape, alpha, dtype=None, seed=None):
seed = jax_draw_seed(seed)
dtype = dtype or floatx()
return jax.random.gamma(seed, alpha, shape=shape, dtype=dtype)
def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
# jax doesn't accept python lists as arguments
counts = jax.numpy.array(counts)
probabilities = jax.numpy.array(probabilities)
sample = jax.random.binomial(
key=seed, n=counts, p=probabilities, shape=shape, dtype=dtype
)
return sample
def beta(shape, alpha, beta, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
# jax doesn't accept python lists as arguments
alpha = jax.numpy.array(alpha)
beta = jax.numpy.array(beta)
sample = jax.random.beta(
key=seed, a=alpha, b=beta, shape=shape, dtype=dtype
)
return sample
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/__init__.py | keras/src/backend/jax/__init__.py | from keras.src.backend.config import is_nnx_enabled
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax import tensorboard
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import name_scope
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/distribution_lib_test.py | keras/src/backend/jax/distribution_lib_test.py | """Test for distribution_lib.py."""
import functools
import os
from unittest import mock
import jax
import numpy as np
import pytest
from jax.experimental import layout as jax_layout
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend import distribution_lib as backend_dlib
from keras.src.distribution import distribution_lib
if backend.backend() == "jax":
# Due to https://github.com/google/jax/issues/17188, we can't
# override the XLA flag after the JAX back init. We have to
# run this at top level to let JAX pick the flag value.
xla_flags = os.getenv("XLA_FLAGS") or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in xla_flags:
os.environ["XLA_FLAGS"] = (
f"{xla_flags} --xla_force_host_platform_device_count=8"
)
@pytest.mark.skipif(
backend.backend() != "jax" or len(jax.devices()) != 8,
reason="Backend specific test and requires 8 devices",
)
class JaxDistributionLibTest(testing.TestCase):
def _create_jax_layout(self, sharding):
# Use jax_layout.Format or jax_layout.Layout if available.
if hasattr(jax_layout, "Format"):
return jax_layout.Format(sharding=sharding)
elif hasattr(jax_layout, "Layout"):
return jax_layout.Layout(sharding=sharding)
return sharding
def test_get_device_count(self):
self.assertEqual(backend_dlib.get_device_count(), 8)
self.assertEqual(backend_dlib.get_device_count("cpu"), 8)
def test_list_devices(self):
self.assertEqual(len(distribution_lib.list_devices()), 8)
self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
def test_device_conversion(self):
devices = distribution_lib.list_devices("cpu")
jax_devices = jax.devices("cpu")
for d, jax_d in zip(devices, jax_devices):
converted_jax_device = backend_dlib._to_backend_device(d)
self.assertIsInstance(converted_jax_device, jax.Device)
self.assertEqual(jax_d, converted_jax_device)
@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_all_job_addresses(self, mock_jax_initialize):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 2, 0)
mock_jax_initialize.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)
def test_initialize_validate_job_and_process(self):
with self.assertRaisesRegex(
ValueError, "has 2 jobs, but num_processes is 3"
):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 3, 0)
@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_coordinator_address(self, mock_jax_initialize):
backend_dlib.initialize("10.0.0.1:1234", 2, 0)
mock_jax_initialize.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)
def test_distribute_tensor(self):
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
inputs = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
@functools.partial(jax.jit, static_argnames="target_layout")
def test_function(inputs, target_layout):
return distribution_lib.distribute_tensor(inputs, target_layout)
result = test_function(inputs, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
# Test without jit
result = distribution_lib.distribute_tensor(inputs, target_layout)
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_distribute_variable(self):
# This test only verify the single worker/process behavior.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
variable = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("model", None)
)
result = backend_dlib.distribute_variable(variable, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_distribute_input_data(self):
# This test only verify the single worker/process behavior.
# The multi-process test lives in g3.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
input_data = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
result = backend_dlib.distribute_variable(input_data, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_distribute_tensor_with_jax_layout(self):
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
inputs = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = self._create_jax_layout(
sharding=jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
)
@functools.partial(jax.jit, static_argnames="target_layout")
def test_function(inputs, target_layout):
return distribution_lib.distribute_tensor(inputs, target_layout)
result = test_function(inputs, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(
result.sharding.is_equivalent_to(target_layout.sharding, ndim=2)
)
# Test without jit.
result = distribution_lib.distribute_tensor(inputs, target_layout)
self.assertTrue(
result.sharding.is_equivalent_to(target_layout.sharding, ndim=2)
)
def test_distribute_variable_with_jax_layout(self):
# This test only verify the single worker/process behavior.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
variable = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = self._create_jax_layout(
sharding=jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("model", None)
)
)
result = backend_dlib.distribute_variable(variable, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(
result.sharding.is_equivalent_to(target_layout.sharding, ndim=2)
)
def test_distribute_input_data_with_jax_layout(self):
# This test only verify the single worker/process behavior.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
input_data = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = self._create_jax_layout(
sharding=jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
)
result = backend_dlib.distribute_variable(input_data, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(
result.sharding.is_equivalent_to(target_layout.sharding, ndim=2)
)
def test_processes(self):
self.assertEqual(backend_dlib.process_id(), 0)
self.assertEqual(backend_dlib.num_processes(), 1)
def test_to_backend_mesh(self):
devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["batch", "model"]
mesh = distribution_lib.DeviceMesh(shape, axis_names, devices)
jax_mesh = backend_dlib._to_backend_mesh(mesh)
self.assertIsInstance(jax_mesh, jax.sharding.Mesh)
self.assertEqual(jax_mesh.devices.shape, shape)
self.assertEqual(jax_mesh.axis_names, ("batch", "model"))
def test_to_backend_layout(self):
axes = ["data", None]
mesh = distribution_lib.DeviceMesh(
(4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)]
)
layout = distribution_lib.TensorLayout(axes, mesh)
jax_sharding = backend_dlib._to_backend_layout(layout)
jax_mesh = backend_dlib._to_backend_mesh(mesh)
self.assertEqual(
jax_sharding,
jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("data", None)
),
)
def test_validation_for_device_mesh(self):
axes = ["data", None]
layout = distribution_lib.TensorLayout(axes, device_mesh=None)
with self.assertRaisesRegex(
ValueError, "Cannot create sharding when device mesh is not set"
):
backend_dlib._to_backend_layout(layout)
def test_variable_assignment_reuse_layout(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="batch"
)
with distribution.scope():
dense_layer = layers.Dense(8)
dense_layer.build((16, 16))
self.assertEqual(
dense_layer.kernel._value.sharding.spec, (None, "model")
)
self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",))
# Assign a numpy value to dense layer to mimic the model weight loading
new_kernel = np.random.normal(size=(16, 8))
new_bias = np.random.normal(size=(8))
dense_layer.kernel.assign(new_kernel)
dense_layer.bias.assign(new_bias)
# Make sure the loaded value still use the layout when it is
# initialized, even outside of the distribution scope.
self.assertEqual(
dense_layer.kernel._value.sharding.spec, (None, "model")
)
self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",))
def test_e2e_data_parallel_model(self):
distribution = distribution_lib.DataParallel(
devices=backend_dlib.list_devices()
)
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
# Make sure all the weights are properly sharded.
for weight in model.weights:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
def test_e2e_model_parallel_model(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="batch"
)
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
for weight in model.weights:
if "kernel" in weight.name:
self.assertEqual(weight._value.sharding.spec, (None, "model"))
elif "bias" in weight.name:
self.assertEqual(weight._value.sharding.spec, ("model",))
else:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
def test_e2e_model_parallel_with_output_sharding(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
# Force the dense layer output to be batch parallel only, and not
# sharded on model dimension.
layout_map[".*dense.*output"] = ("batch", None)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="batch"
)
sharding_capture = ShardingCaptureLayer()
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = sharding_capture(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
for weight in model.weights:
if "kernel" in weight.name:
self.assertEqual(weight._value.sharding.spec, (None, "model"))
elif "bias" in weight.name:
self.assertEqual(weight._value.sharding.spec, ("model",))
else:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
# Note that the intermediate_tensor_layout is only captured during the
# actual training, and not at the model building time.
intermediate_tensor_layout = jax.sharding.NamedSharding(
backend_dlib._to_backend_mesh(distribution.device_mesh),
jax.sharding.PartitionSpec("batch", None),
)
self.assertTrue(
sharding_capture.captured_input_sharding.is_equivalent_to(
intermediate_tensor_layout, ndim=2
)
)
def test_distribute_data_input(self):
per_process_batch = jax.numpy.arange(24).reshape(
6, 4
) # Example input array
devices = jax.devices()[:4] # Simulate 4 devices
batch_dim_size, model_dim_size = 2, 2
mesh = jax.sharding.Mesh(
np.array(devices).reshape(batch_dim_size, model_dim_size),
axis_names=["batch", "model"],
)
layout = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("batch", None)
)
result = backend_dlib.distribute_data_input(
per_process_batch, layout, "batch"
)
# Check the shape of the global batch array
self.assertEqual(
result.shape, (6, 4)
) # (per_replica_batch_size * num_model_replicas_total, 4)
# Check the sharding of the global batch array
self.assertEqual(len(result.addressable_shards), len(devices))
# Since batch_dim_size=2, there are 2 model replicas so there is one
# replication of data for model replica #1 and another replication of
# data for model replica #2. Within each model replica, the data is
# sharded to two shards. Therefore, each shard has 1/2 of
# per_process_batch.
for shard in result.addressable_shards:
self.assertEqual(shard.data.shape, (3, 4))
class ShardingCaptureLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.captured_input_sharding = None
self.supports_masking = True
def call(self, inputs):
jax.debug.inspect_array_sharding(
inputs, callback=lambda x: self.capture_input_sharding(x)
)
return inputs
def capture_input_sharding(self, sharding):
self.captured_input_sharding = sharding
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/sparse.py | keras/src/backend/jax/sparse.py | import functools
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from keras.src.utils import jax_utils
def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims):
"""Turn the `axis` argument to the arguments needed by `broadcast_in_dim`.
Args:
axis: single int or a tuple of ints for the axis argument. The list of
dimensions to reduce or insert.
input_shape: the shape of the input as a tuple ints.
insert_dims: `False` turns dimensions in `axis` to 1s (use case:
reduction along `axis` with `keep_dims=True`). `True`, inserts 1s
according to `axis` (use case: `expand_dims`).
Returns:
A tuple of three lists
- The canonical value for `axis`: always a list, negative values have
been resolved and values are sorted in ascending order.
- The output shape: `input_shape` with 1s at the indices in `axis`, for
use as the `shape` argument of `broadcast_in_dim`.
- The broadcast dimensions: list of dimensions not in `axis`, for use as
the `broadcast_dimensions` argument of `broadcast_in_dim`.
"""
if axis is None:
raise ValueError("Received `None` value for `axis`")
if isinstance(axis, int):
axis = (axis,)
# Check uniqueness.
if len(set(axis)) != len(axis):
raise ValueError(f"Repeated axis in `axis`: {axis}")
result_dims = len(input_shape)
if insert_dims:
result_dims += len(axis)
# Resolve negative values.
canonical_axis = []
for a in axis:
if not -result_dims <= a < result_dims:
raise ValueError(
f"In `axis`, axis {a} is out of bounds for array "
f"of dimension {result_dims}"
)
if a < 0:
a = a + result_dims
canonical_axis.append(a)
# Check uniqueness again after resolving negative values.
if len(set(canonical_axis)) != len(canonical_axis):
raise ValueError(f"Repeated axis in `axis`: {canonical_axis}")
canonical_axis = sorted(canonical_axis)
# Compute output shape.
output_shape = list(input_shape)
for i in canonical_axis:
if insert_dims:
output_shape.insert(i, 1)
else:
output_shape[i] = 1
broadcast_dims = [i for i in range(result_dims) if i not in canonical_axis]
return canonical_axis, output_shape, broadcast_dims
def bcoo_add_indices(x1, x2, sum_duplicates):
"""Add the indices of `x2` to `x1` with zero values.
Args:
x1: `BCOO` tensor to add indices to.
x2: `BCOO` tensor to take the indices to add to x1.
sum_duplicates: if `True` calls `bcoo_sum_duplicates` on the output.
Returns:
a `BCOO` tensor equal to `x1` but with extra zeros at indices in `x2`
that were missing in `x1`.
"""
x2_zeros = jnp.zeros(x2.data.shape, x1.data.dtype)
concat_axis = len(x1.indices.shape) - 2
output_indices = jnp.concatenate([x1.indices, x2.indices], axis=concat_axis)
output_data = jnp.concatenate([x1.data, x2_zeros], axis=concat_axis)
output = jax_sparse.BCOO((output_data, output_indices), shape=x1.shape)
if sum_duplicates:
output = jax_sparse.bcoo_sum_duplicates(output)
return output
def densifying_unary(func):
"""Decorator to add support for `JAXSparse` tensors (including `BCOO`) to a
non-zero-preserving element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape.
Additional arguments to the function (besides the input tensor) are
supported. The returned result is a dense tensor.
Args:
func: The unary operator to wrap.
Returns:
Wrapped function that supports `JAXSparse` tensors.
"""
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, jax_sparse.JAXSparse):
x = x.todense()
return func(x, *args, **kwargs)
return sparse_wrapper
def elementwise_unary(linear):
"""Decorator to add support for `BCOO` sparse tensors to a zero-preserving
element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape, and if it is a
`BCOO` tensor, the indices of the result must be the same. Therefore:
- Reduction operations are not supported (e.g. `mean`).
- Operations for which the result may be dense (e.g. `reciprocal`), or
the sparse indices depend on the inputs are not supported (e.g.
`clip`). This implies that `func(0)` must be 0.
Additional arguments to the function (besides the input tensor) are
supported as long as they cannot change the indices of the result. For
instance,`round` is supported, but `clip` is not supported as
`clip(x, 1.0, 2.0)` would always return a dense tensor.
Note that if an input sparse tensor contains zero values, the indices and
the zero values are preserved.
Args:
linear: if `True`, means that the operation is such that
`op(a + b) == op(a) + op(b)`.
Returns:
Wrapped function that supports `BCOO` sparse tensors.
"""
def wrap_elementwise_unary(func):
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, jax_sparse.BCOO):
if not linear and not x.unique_indices:
x = jax_sparse.bcoo_sum_duplicates(x)
return jax_sparse.BCOO(
(func(x.data, *args, **kwargs), x.indices), shape=x.shape
)
else:
return func(x, *args, **kwargs)
return sparse_wrapper
return wrap_elementwise_unary
def elementwise_binary_union(linear, use_sparsify):
"""Decorator to add support for `JAXSparse` tensors (including `BCOO`) to an
element-wise binary operator such that the indices present in the result are
are the union of the indices in the two operand.
The primary use case for this is the `add` and `subtract` operators.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise.
- The operator must be binary (two input tensors and one output tensor).
- Both inputs must be of the same shape or one input must be a scalar.
- The output must be of the same shape as the (non scalar) inputs.
- The indices of the output must be the union of the indices of the inputs.
This implies that func(0, 0) must be 0. As a result, if one operand is
dense or a scalar, then the result will be dense.
Additional arguments to the function (besides the input tensors) are not
supported.
Note that if the result of the operation is zero at some indices, including
because the operands were zero at these indices, the zeros and indices are
preserved.
The `BCOO` format is the only supported one in all cases. Other formats are
not supported when `use_sparsify` is `False`.
Args:
use_sparsify: indicates that the JAX `sparsify` transform supports this
operation.
linear: if `True`, mean that the operation is such that
`op(a + b, c) == op(a, c) + op(b, c)` and
`op(a, c + d) == op(a, c) + op(a, d)`.
Returns:
Wrapped function that supports `JAXSparse`.
"""
def wrap_elementwise_binary_union(func):
sparse_func = jax_sparse.sparsify(func) if use_sparsify else None
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, jax_sparse.JAXSparse):
if isinstance(x2, jax_sparse.JAXSparse):
# x1 and x2 are sparse.
# The way we use `sparsify` it cannot know that the indices
# are the same, so we optimize this case here.
if (
x1.indices is x2.indices
and isinstance(x1, jax_sparse.BCOO)
and isinstance(x2, jax_sparse.BCOO)
):
if not linear and not x1.unique_indices:
x1 = jax_sparse.bcoo_sum_duplicates(x1)
x2 = jax_sparse.bcoo_sum_duplicates(x2)
return jax_sparse.BCOO(
(func(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
elif use_sparsify:
return sparse_func(x1, x2)
elif isinstance(x1, jax_sparse.BCOO) and isinstance(
x2, jax_sparse.BCOO
):
x1 = bcoo_add_indices(x1, x2, sum_duplicates=not linear)
x2 = bcoo_add_indices(x2, x1, sum_duplicates=not linear)
return jax_sparse.BCOO(
(func(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=True,
unique_indices=True,
)
else:
ValueError(
"Unsupported sparse format: "
f"{x1.__class__} and {x2.__class__}"
)
else:
# x1 is sparse, x2 is dense, densify x2.
x1 = x1.todense()
elif isinstance(x2, jax_sparse.JAXSparse):
# x1 is dense, x2 is sparse, densify x2.
x2 = x2.todense()
return func(x1, x2)
return sparse_wrapper
return wrap_elementwise_binary_union
def elementwise_division(func):
"""Decorator to add support for `BCOO` sparse tensors to element-wise binary
division and related operators.
This decorator is designed for operations related to the division of two
two operands (e.g. `divide`). It accepts `BCOO` tensors for both the
dividend and the divisor, but handles them differently based on whether they
are the dividend or the divisor.
- If the divisor is sparse, it is densified and the result is dense because
the result contains Inf or Nan outside of the indices of the dividend.
- If the dividend is sparse and the divisor is dense, it finds occurrences
of zeros and NaNs in the divisor. The result may therefore have more
indices than there were in the dividend to return correct values where the
divisor was zero or NaN.
- If the dividend is sparse and the divisor is a scalar, it does the
division element-wise. Note that the result is incorrectly sparse if the
scalar divisor is zero.
Args:
func: The function to wrap.
Returns:
Wrapped function that supports `BCOO` sparse tensors.
"""
sparse_func = jax_sparse.sparsify(func)
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, jax_sparse.JAXSparse):
if isinstance(x2, jax_sparse.JAXSparse):
# x1 is sparse and x2 is sparse.
# Divisor is sparse, meaning we're doing divisions by zero
# outside of x2.indices, so the result is dense. Densify both.
x1 = x1.todense()
x2 = x2.todense()
elif isinstance(x1, jax_sparse.BCOO):
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x1 is sparse BCOO, x2 is scalar, apply func element-wise.
return jax_sparse.BCOO(
(func(x1.data, x2), x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
else:
# x1 is sparse BCOO, x2 is dense.
if not jax_utils.is_in_jax_tracing_scope(x2):
# Find zeros and nans in x2 and add indices to x1.
# 1. Create a dense mask for zeros and nans.
x2_zeros_and_nans = jnp.equal(x2, 0)
if not jnp.issubdtype(x2.dtype, jnp.integer):
x2_zeros_and_nans = jnp.logical_or(
x2_zeros_and_nans, jnp.isnan(x2)
)
# 2. Make it a BCOO of True values.
x2_zeros_and_nans = jax_sparse.bcoo_fromdense(
x2_zeros_and_nans,
n_batch=x1.n_batch,
n_dense=x1.n_dense,
index_dtype=x1.indices.dtype,
)
# 3. Add the indices to x1.
x1 = bcoo_add_indices(
x1, x2_zeros_and_nans, sum_duplicates=True
)
return sparse_func(x1, x2)
else:
raise ValueError(f"Unsupported sparse format: {x1.__class__}")
elif isinstance(x2, jax_sparse.JAXSparse):
# x1 is dense, x2 is sparse, densify x2
x2 = x2.todense()
return func(x1, x2)
return sparse_wrapper
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/math.py | keras/src/backend/jax/math.py | import math
import jax
import jax.numpy as jnp
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.utils.module_utils import scipy
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
if num_segments is None:
raise ValueError(
"Argument `num_segments` must be set when using the JAX backend. "
"Received: num_segments=None"
)
return jax.ops.segment_sum(
data, segment_ids, num_segments, indices_are_sorted=sorted
)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
if num_segments is None:
raise ValueError(
"Argument `num_segments` must be set when using the JAX backend. "
"Received: num_segments=None"
)
return jax.ops.segment_max(
data, segment_ids, num_segments, indices_are_sorted=sorted
)
def top_k(x, k, sorted=True):
# Jax does not supported `sorted`, but in the case where `sorted=False`,
# order is not guaranteed, so OK to return sorted output.
return jax.lax.top_k(x, k)
def in_top_k(targets, predictions, k):
preds_at_label = jnp.take_along_axis(
predictions, jnp.expand_dims(targets, axis=-1), axis=-1
)
# `nan` shouldn't be considered as large probability.
preds_at_label = jnp.where(
jnp.isnan(preds_at_label), -jnp.inf, preds_at_label
)
rank = 1 + jnp.sum(jnp.greater(predictions, preds_at_label), axis=-1)
return jnp.less_equal(rank, k)
def logsumexp(x, axis=None, keepdims=False):
return jax.scipy.special.logsumexp(x, axis=axis, keepdims=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return jnp.linalg.qr(x, mode=mode)
def extract_sequences(x, sequence_length, sequence_stride):
*batch_shape, signal_length = x.shape
batch_shape = list(batch_shape)
x = jnp.reshape(x, (math.prod(batch_shape), signal_length, 1))
x = jax.lax.conv_general_dilated_patches(
x,
(sequence_length,),
(sequence_stride,),
"VALID",
dimension_numbers=("NTC", "OIT", "NTC"),
)
return jnp.reshape(x, (*batch_shape, *x.shape[-2:]))
def _get_complex_tensor_from_tuple(x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
f"Received: x={x}"
)
# `convert_to_tensor` does not support passing complex tensors. We separate
# the input out into real and imaginary and convert them separately.
real, imag = x
# Check shapes.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
"Both the real and imaginary parts should have the same shape. "
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
)
# Ensure dtype is float.
if not jnp.issubdtype(real.dtype, jnp.floating) or not jnp.issubdtype(
imag.dtype, jnp.floating
):
raise ValueError(
"At least one tensor in input `x` is not of type float."
f"Received: x={x}."
)
complex_input = jax.lax.complex(real, imag)
return complex_input
def fft(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = jnp.fft.fft(complex_input)
return jnp.real(complex_output), jnp.imag(complex_output)
def fft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = jnp.fft.fft2(complex_input)
return jnp.real(complex_output), jnp.imag(complex_output)
def ifft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = jnp.fft.ifft2(complex_input)
return jnp.real(complex_output), jnp.imag(complex_output)
def rfft(x, fft_length=None):
complex_output = jnp.fft.rfft(x, n=fft_length, axis=-1, norm="backward")
return jnp.real(complex_output), jnp.imag(complex_output)
def irfft(x, fft_length=None):
complex_input = _get_complex_tensor_from_tuple(x)
return jnp.fft.irfft(complex_input, n=fft_length, axis=-1, norm="backward")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
raise TypeError(
"Invalid input type. Expected `float32` or `float64`. "
f"Received: input type={x.dtype}"
)
if fft_length < sequence_length:
raise ValueError(
"`fft_length` must equal or larger than `sequence_length`. "
f"Received: sequence_length={sequence_length}, "
f"fft_length={fft_length}"
)
if isinstance(window, str):
if window not in {"hann", "hamming"}:
raise ValueError(
"If a string is passed to `window`, it must be one of "
f'`"hann"`, `"hamming"`. Received: window={window}'
)
x = convert_to_tensor(x)
if center:
pad_width = [(0, 0) for _ in range(len(x.shape))]
pad_width[-1] = (fft_length // 2, fft_length // 2)
x = jnp.pad(x, pad_width, mode="reflect")
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
win = convert_to_tensor(
scipy.signal.get_window(window, sequence_length), dtype=x.dtype
)
else:
win = convert_to_tensor(window, dtype=x.dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
win = jnp.pad(win, [[l_pad, r_pad]])
else:
win = jnp.ones((sequence_length + l_pad + r_pad), dtype=x.dtype)
result = jax.scipy.signal.stft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=None,
padded=False,
)[-1]
# scale and swap to (..., num_sequences, fft_bins)
scale = jnp.sqrt(1.0 / win.sum() ** 2)
result = result / scale
result = jnp.swapaxes(result, -2, -1)
return jnp.real(result), jnp.imag(result)
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
x = _get_complex_tensor_from_tuple(x)
dtype = jnp.real(x).dtype
if len(x.shape) < 2:
raise ValueError(
f"Input `x` must have at least 2 dimensions. "
f"Received shape: {x.shape}"
)
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
win = convert_to_tensor(
scipy.signal.get_window(window, sequence_length), dtype=dtype
)
else:
win = convert_to_tensor(window, dtype=dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
win = jnp.pad(win, [[l_pad, r_pad]])
else:
win = jnp.ones((sequence_length + l_pad + r_pad), dtype=dtype)
x = jax.scipy.signal.istft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=False,
time_axis=-2,
freq_axis=-1,
)[-1]
# scale
x = x / win.sum() if window is not None else x / sequence_stride
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center is True:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
def rsqrt(x):
return jax.lax.rsqrt(x)
def erf(x):
return jax.lax.erf(x)
def erfinv(x):
return jax.lax.erf_inv(x)
def solve(a, b):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return jnp.linalg.solve(a, b)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
def logdet(x):
from keras.src.backend.jax.numpy import slogdet
# In JAX (like in NumPy) slogdet is more stable than
# `np.log(np.linalg.det(x))`. See
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.slogdet.html
return slogdet(x)[1]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/jax/core_test.py | keras/src/backend/jax/core_test.py | import os
import jax
import jax.numpy as jnp
import numpy as np
import pytest
import keras
from keras.src import backend
from keras.src import testing
from keras.src.backend.config import is_nnx_enabled
if is_nnx_enabled():
from flax import nnx
from keras.src.backend.jax.core import NnxVariable
@pytest.mark.skipif(
backend.backend() != "jax",
reason="JAX backend specific test for core Variable integration with NNX.",
)
@pytest.mark.skipif(
not is_nnx_enabled(),
reason="Test requires NNX backend to be enabled by default for setup.",
)
class NnxVariableTest(testing.TestCase):
def setup(self):
super().setup()
class NNXModel(nnx.Module):
def __init__(self, rngs):
self.linear = nnx.Linear(2, 3, rngs=rngs)
# Use NnxVariable directly as KerasJaxVariable
# might be JaxVariable if NNX is disabled globally.
self.custom_variable = NnxVariable(jnp.ones((1, 3)))
def __call__(self, x):
return self.linear(x) + self.custom_variable
self.nnx_model = NNXModel(rngs=nnx.Rngs(0))
self.keras_nnx_model = keras.Sequential(
[keras.layers.Dense(units=1, input_shape=(10,))]
)
self.single_dummy_input = np.random.rand(1, 10)
def test_variable_in_nnx_module(self):
self.assertTrue(hasattr(self.nnx_model.custom_variable, "_trace_state"))
self.assertIsNotNone(self.nnx_model.custom_variable._trace_state)
self.assertAllEqual(self.nnx_model.custom_variable.value, [[1, 1, 1]])
self.assertTrue(
isinstance(self.nnx_model.custom_variable, nnx.Variable)
)
def test_model_saving(self):
path = os.path.join(self.get_temp_dir(), "model.keras")
original_outputs = self.keras_nnx_model(self.single_dummy_input)
self.keras_nnx_model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(path)
restored_outputs = restored_model(self.single_dummy_input)
self.assertAllEqual(original_outputs, restored_outputs)
def test_keras_variable_nnx_split_merge_sync(self):
variable1 = keras.Variable(jnp.array(1.0))
graphdef, state = nnx.split(variable1)
state = jax.tree.map(lambda x: x + 1, state)
variable2 = nnx.merge(graphdef, state)
self.assertEqual(variable2._value, variable2.value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tests/compute_output_spec_test.py | keras/src/backend/tests/compute_output_spec_test.py | import unittest
import pytest
from keras.src import backend
from keras.src import ops
from keras.src.backend.common.keras_tensor import KerasTensor
def single_arg_test_fn(x):
return ops.concatenate([(x + 1) ** 2, x], axis=-1)
def three_args_2_kwarg_test_fn(x1, x2, x3=None):
x1 = ops.max(x1, axis=1)
x2 = ops.max(x2, axis=1)
if x3 is not None:
x1 += ops.max(x3, axis=1)
return x1 + x2
class ComputeOutputSpecTest(unittest.TestCase):
def test_dynamic_batch_size(self):
x = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (None, 3, 10))
x1 = KerasTensor(shape=(None, 3, 5))
x2 = KerasTensor(shape=(None, 3, 5))
x3 = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
def test_dynamic_everything(self):
x = KerasTensor(shape=(2, None, 3))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (2, None, 6))
x1 = KerasTensor(shape=(None, None, 5))
x2 = KerasTensor(shape=(None, None, 5))
x3 = KerasTensor(shape=(None, None, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_sparse(self):
def single_arg_sparse_fn(x):
y0 = ops.transpose(x, axes=(0, 2, 1))
y1 = ops.squeeze(ops.expand_dims(x, axis=3), axis=3)
return (y0, y1)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_sparse_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
def three_args_sparse_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, sparse
y1 = ops.divide(x1, x3) # sparse, dense
y2 = ops.matmul(x1, x2) # sparse, sparse
y3 = ops.multiply(x1, x3) # sparse, dense
return (y0, y1, y2, y3)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=True)
x3 = KerasTensor(shape=(None, 3, 3), sparse=False)
ys = backend.compute_output_spec(three_args_sparse_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_dense(self):
def single_arg_dense_fn(x):
y0 = ops.exp(x)
return (y0,)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_dense_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
def three_args_dense_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, dense
y1 = ops.add(x2, x1) # dense, sparse
y2 = ops.concatenate([x1, x2], axis=0) # sparse, dense
y3 = ops.matmul(x1, x2) # sparse, dense
y4 = ops.matmul(x2, x1) # dense, sparse
y5 = ops.take(x2, indices=x3, axis=1) # dense, sparse
y6 = ops.divide(x1, x1) # sparse, sparse
return (y0, y1, y2, y3, y4, y5, y6)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=False)
x3 = KerasTensor(shape=(3,), dtype="int64", sparse=True)
ys = backend.compute_output_spec(three_args_dense_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tests/device_scope_test.py | keras/src/backend/tests/device_scope_test.py | import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# Need at least one GPU for the following testing.
if not torch.cuda.is_available():
return
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_meta_device(self):
import torch
with torch.device("meta"):
x = torch.ones(5)
t = backend.convert_to_tensor(x)
if not torch.cuda.is_available():
self.assertEqual(t.device, torch.device("cpu"))
else:
self.assertEqual(t.device, torch.device("cuda", 0))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/image.py | keras/src/backend/tensorflow/image.py | import functools
import itertools
import operator
import numpy as np
import tensorflow as tf
from keras.src import backend
from keras.src.backend.tensorflow.core import convert_to_tensor
from keras.src.backend.tensorflow.numpy import moveaxis
from keras.src.random.seed_generator import draw_seed
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
"area",
)
AFFINE_TRANSFORM_INTERPOLATIONS = (
"nearest",
"bilinear",
)
AFFINE_TRANSFORM_FILL_MODES = (
"constant",
"nearest",
"wrap",
# "mirror", not supported by TF
"reflect",
)
MAP_COORDINATES_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
SCALE_AND_TRANSLATE_METHODS = {
"linear",
"bilinear",
"trilinear",
"cubic",
"bicubic",
"tricubic",
"lanczos3",
"lanczos5",
}
def rgb_to_grayscale(images, data_format=None):
images = convert_to_tensor(images)
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
# Convert to floats
original_dtype = images.dtype
compute_dtype = backend.result_type(images.dtype, float)
images = tf.cast(images, compute_dtype)
# Ref: tf.image.rgb_to_grayscale
rgb_weights = convert_to_tensor(
[0.2989, 0.5870, 0.1140], dtype=images.dtype
)
images = tf.tensordot(images, rgb_weights, axes=(channels_axis, -1))
images = tf.expand_dims(images, axis=channels_axis)
return tf.cast(images, original_dtype)
def rgb_to_hsv(images, data_format=None):
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
if data_format == "channels_first":
if len(images.shape) == 4:
images = tf.transpose(images, (0, 2, 3, 1))
else:
images = tf.transpose(images, (1, 2, 0))
images = tf.image.rgb_to_hsv(images)
if data_format == "channels_first":
if len(images.shape) == 4:
images = tf.transpose(images, (0, 3, 1, 2))
elif len(images.shape) == 3:
images = tf.transpose(images, (2, 0, 1))
return images
def hsv_to_rgb(images, data_format=None):
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
if data_format == "channels_first":
if len(images.shape) == 4:
images = tf.transpose(images, (0, 2, 3, 1))
else:
images = tf.transpose(images, (1, 2, 0))
images = tf.image.hsv_to_rgb(images)
if data_format == "channels_first":
if len(images.shape) == 4:
images = tf.transpose(images, (0, 3, 1, 2))
elif len(images.shape) == 3:
images = tf.transpose(images, (2, 0, 1))
return images
def resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if fill_mode != "constant":
raise ValueError(
"Invalid value for argument `fill_mode`. Only `'constant'` "
f"is supported. Received: fill_mode={fill_mode}"
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
"can be `True`."
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if data_format == "channels_first":
if len(images.shape) == 4:
images = tf.transpose(images, (0, 2, 3, 1))
else:
images = tf.transpose(images, (1, 2, 0))
if crop_to_aspect_ratio:
shape = tf.shape(images)
height, width = shape[-3], shape[-2]
target_height, target_width = size
crop_height = tf.cast(
tf.cast(width * target_height, "float32") / target_width,
"int32",
)
crop_height = tf.maximum(tf.minimum(height, crop_height), 1)
crop_height = tf.cast(crop_height, "int32")
crop_width = tf.cast(
tf.cast(height * target_width, "float32") / target_height,
"int32",
)
crop_width = tf.maximum(tf.minimum(width, crop_width), 1)
crop_width = tf.cast(crop_width, "int32")
crop_box_hstart = tf.cast(
tf.cast(height - crop_height, "float32") / 2, "int32"
)
crop_box_wstart = tf.cast(
tf.cast(width - crop_width, "float32") / 2, "int32"
)
if len(images.shape) == 4:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
images = images[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
elif pad_to_aspect_ratio:
shape = tf.shape(images)
height, width = shape[-3], shape[-2]
target_height, target_width = size
pad_height = tf.cast(
tf.cast(width * target_height, "float32") / target_width,
"int32",
)
pad_height = tf.maximum(height, pad_height)
pad_height = tf.cast(pad_height, "int32")
pad_width = tf.cast(
tf.cast(height * target_width, "float32") / target_height,
"int32",
)
pad_width = tf.maximum(width, pad_width)
pad_width = tf.cast(pad_width, "int32")
img_box_hstart = tf.cast(
tf.cast(pad_height - height, "float32") / 2, "int32"
)
img_box_wstart = tf.cast(
tf.cast(pad_width - width, "float32") / 2, "int32"
)
if len(images.shape) == 4:
batch_size = tf.shape(images)[0]
channels = tf.shape(images)[3]
padded_img = tf.cond(
img_box_hstart > 0,
lambda: tf.concat(
[
tf.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
tf.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
),
lambda: images,
)
padded_img = tf.cond(
img_box_wstart > 0,
lambda: tf.concat(
[
tf.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
padded_img,
tf.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=2,
),
lambda: padded_img,
)
else:
channels = tf.shape(images)[2]
padded_img = tf.cond(
img_box_hstart > 0,
lambda: tf.concat(
[
tf.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
tf.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=0,
),
lambda: images,
)
padded_img = tf.cond(
img_box_wstart > 0,
lambda: tf.concat(
[
tf.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
padded_img,
tf.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
),
lambda: padded_img,
)
images = padded_img
resized = tf.image.resize(
images, size, method=interpolation, antialias=antialias
)
if data_format == "channels_first":
if len(images.shape) == 4:
resized = tf.transpose(resized, (0, 3, 1, 2))
elif len(images.shape) == 3:
resized = tf.transpose(resized, (2, 0, 1))
return resized
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# unbatched case
need_squeeze = False
if len(images.shape) == 3:
images = tf.expand_dims(images, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = tf.expand_dims(transform, axis=0)
if data_format == "channels_first":
images = tf.transpose(images, (0, 2, 3, 1))
affined = tf.raw_ops.ImageProjectiveTransformV3(
images=images,
transforms=tf.cast(transform, dtype=tf.float32),
output_shape=tf.shape(images)[1:-1],
fill_value=fill_value,
interpolation=interpolation.upper(),
fill_mode=fill_mode.upper(),
)
affined = tf.ensure_shape(affined, images.shape)
if data_format == "channels_first":
affined = tf.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = tf.squeeze(affined, axis=0)
return affined
def perspective_transform(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
start_points = convert_to_tensor(start_points, dtype=tf.float32)
end_points = convert_to_tensor(end_points, dtype=tf.float32)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
f"interpolation={interpolation}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if start_points.shape.rank not in (2, 3) or start_points.shape[-2:] != (
4,
2,
):
raise ValueError(
"Invalid start_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {start_points.shape}"
)
if end_points.shape.rank not in (2, 3) or end_points.shape[-2:] != (4, 2):
raise ValueError(
"Invalid end_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {end_points.shape}"
)
if start_points.shape != end_points.shape:
raise ValueError(
"start_points and end_points must have the same shape."
f" Received start_points.shape={start_points.shape}, "
f"end_points.shape={end_points.shape}"
)
need_squeeze = False
if len(images.shape) == 3:
images = tf.expand_dims(images, axis=0)
need_squeeze = True
if len(start_points.shape) == 2:
start_points = tf.expand_dims(start_points, axis=0)
if len(end_points.shape) == 2:
end_points = tf.expand_dims(end_points, axis=0)
if data_format == "channels_first":
images = tf.transpose(images, (0, 2, 3, 1))
transform = compute_homography_matrix(start_points, end_points)
if len(transform.shape) == 1:
transform = tf.expand_dims(transform, axis=0)
output = tf.raw_ops.ImageProjectiveTransformV3(
images=images,
transforms=tf.cast(transform, dtype=tf.float32),
output_shape=tf.shape(images)[1:-1],
fill_value=fill_value,
interpolation=interpolation.upper(),
)
output = tf.ensure_shape(output, images.shape)
if data_format == "channels_first":
output = tf.transpose(output, (0, 3, 1, 2))
if need_squeeze:
output = tf.squeeze(output, axis=0)
return output
def compute_homography_matrix(start_points, end_points):
start_x1, start_y1 = start_points[:, 0, 0], start_points[:, 0, 1]
start_x2, start_y2 = start_points[:, 1, 0], start_points[:, 1, 1]
start_x3, start_y3 = start_points[:, 2, 0], start_points[:, 2, 1]
start_x4, start_y4 = start_points[:, 3, 0], start_points[:, 3, 1]
end_x1, end_y1 = end_points[:, 0, 0], end_points[:, 0, 1]
end_x2, end_y2 = end_points[:, 1, 0], end_points[:, 1, 1]
end_x3, end_y3 = end_points[:, 2, 0], end_points[:, 2, 1]
end_x4, end_y4 = end_points[:, 3, 0], end_points[:, 3, 1]
coefficient_matrix = tf.stack(
[
tf.stack(
[
end_x1,
end_y1,
tf.ones_like(end_x1),
tf.zeros_like(end_x1),
tf.zeros_like(end_x1),
tf.zeros_like(end_x1),
-start_x1 * end_x1,
-start_x1 * end_y1,
],
axis=-1,
),
tf.stack(
[
tf.zeros_like(end_x1),
tf.zeros_like(end_x1),
tf.zeros_like(end_x1),
end_x1,
end_y1,
tf.ones_like(end_x1),
-start_y1 * end_x1,
-start_y1 * end_y1,
],
axis=-1,
),
tf.stack(
[
end_x2,
end_y2,
tf.ones_like(end_x2),
tf.zeros_like(end_x2),
tf.zeros_like(end_x2),
tf.zeros_like(end_x2),
-start_x2 * end_x2,
-start_x2 * end_y2,
],
axis=-1,
),
tf.stack(
[
tf.zeros_like(end_x2),
tf.zeros_like(end_x2),
tf.zeros_like(end_x2),
end_x2,
end_y2,
tf.ones_like(end_x2),
-start_y2 * end_x2,
-start_y2 * end_y2,
],
axis=-1,
),
tf.stack(
[
end_x3,
end_y3,
tf.ones_like(end_x3),
tf.zeros_like(end_x3),
tf.zeros_like(end_x3),
tf.zeros_like(end_x3),
-start_x3 * end_x3,
-start_x3 * end_y3,
],
axis=-1,
),
tf.stack(
[
tf.zeros_like(end_x3),
tf.zeros_like(end_x3),
tf.zeros_like(end_x3),
end_x3,
end_y3,
tf.ones_like(end_x3),
-start_y3 * end_x3,
-start_y3 * end_y3,
],
axis=-1,
),
tf.stack(
[
end_x4,
end_y4,
tf.ones_like(end_x4),
tf.zeros_like(end_x4),
tf.zeros_like(end_x4),
tf.zeros_like(end_x4),
-start_x4 * end_x4,
-start_x4 * end_y4,
],
axis=-1,
),
tf.stack(
[
tf.zeros_like(end_x4),
tf.zeros_like(end_x4),
tf.zeros_like(end_x4),
end_x4,
end_y4,
tf.ones_like(end_x4),
-start_y4 * end_x4,
-start_y4 * end_y4,
],
axis=-1,
),
],
axis=1,
)
target_vector = tf.stack(
[
start_x1,
start_y1,
start_x2,
start_y2,
start_x3,
start_y3,
start_x4,
start_y4,
],
axis=-1,
)
target_vector = tf.expand_dims(target_vector, axis=-1)
homography_matrix = tf.linalg.solve(coefficient_matrix, target_vector)
homography_matrix = tf.reshape(homography_matrix, [-1, 8])
return homography_matrix
def _mirror_index_fixer(index, size):
s = size - 1 # Half-wavelength of triangular wave
# Scaled, integer-valued version of the triangular wave |x - round(x)|
return tf.abs((index + s) % (2 * s) - s)
def _reflect_index_fixer(index, size):
return tf.math.floordiv(
_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2
)
def _nearest_indices_and_weights(coordinate):
coordinate = (
coordinate if coordinate.dtype.is_integer else tf.round(coordinate)
)
index = tf.cast(coordinate, tf.int32)
weight = tf.constant(1, coordinate.dtype)
return [(index, weight)]
def _linear_indices_and_weights(coordinate):
lower = tf.floor(coordinate)
upper_weight = coordinate - lower
lower_weight = 1 - upper_weight
index = tf.cast(lower, tf.int32)
return [(index, lower_weight), (index + 1, upper_weight)]
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0.0
):
input_arr = convert_to_tensor(inputs)
coordinate_arrs = convert_to_tensor(coordinates)
if coordinate_arrs.shape[0] != len(input_arr.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {input_arr.shape} and coordinate "
f"leading dim of {coordinate_arrs.shape[0]}"
)
if len(coordinate_arrs.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinate_arrs.shape}"
)
if fill_mode not in MAP_COORDINATES_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(MAP_COORDINATES_FILL_MODES.keys())}. Received: "
f"fill_mode={fill_mode}"
)
fill_value = convert_to_tensor(fill_value, dtype=input_arr.dtype)
coordinate_arrs = tf.unstack(coordinate_arrs, axis=0)
if order == 0:
interp_fun = _nearest_indices_and_weights
elif order == 1:
interp_fun = _linear_indices_and_weights
else:
raise NotImplementedError("map_coordinates currently requires order<=1")
def process_coordinates(coords, size):
if fill_mode == "constant":
valid = (coords >= 0) & (coords < size)
safe_coords = tf.clip_by_value(coords, 0, size - 1)
return safe_coords, valid
elif fill_mode == "nearest":
return tf.clip_by_value(coords, 0, size - 1), tf.ones_like(
coords, dtype=tf.bool
)
elif fill_mode in ["mirror", "reflect"]:
coords = tf.abs(coords)
size_2 = size * 2
mod = tf.math.mod(coords, size_2)
under = mod < size
over = ~under
# reflect mode is same as mirror for under
coords = tf.where(under, mod, size_2 - mod)
# for reflect mode, adjust the over case
if fill_mode == "reflect":
coords = tf.where(over, coords - 1, coords)
return coords, tf.ones_like(coords, dtype=tf.bool)
elif fill_mode == "wrap":
coords = tf.math.mod(coords, size)
return coords, tf.ones_like(coords, dtype=tf.bool)
else:
raise ValueError(f"Unknown fill_mode: {fill_mode}")
valid_1d_interpolations = []
for coordinate, size in zip(coordinate_arrs, input_arr.shape):
interp_nodes = interp_fun(coordinate)
valid_interp = []
for index, weight in interp_nodes:
safe_index, valid = process_coordinates(index, size)
valid_interp.append((safe_index, valid, weight))
valid_1d_interpolations.append(valid_interp)
outputs = []
for items in itertools.product(*valid_1d_interpolations):
indices, validities, weights = zip(*items)
indices = tf.transpose(tf.stack(indices))
gathered = tf.transpose(tf.gather_nd(input_arr, indices))
# Cast to computation dtype early to avoid type issues
dtype = weights[0].dtype
gathered = tf.cast(gathered, dtype)
gathered = tf.cast(gathered, weights[0].dtype)
if fill_mode == "constant":
all_valid = tf.reduce_all(validities, axis=0)
fill_value_typed = tf.cast(fill_value, dtype)
gathered = tf.where(all_valid, gathered, fill_value_typed)
outputs.append(functools.reduce(operator.mul, weights) * gathered)
result = functools.reduce(operator.add, outputs)
if input_arr.dtype.is_integer:
result = tf.round(result)
return tf.cast(result, input_arr.dtype)
def gaussian_blur(
images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None
):
def _create_gaussian_kernel(kernel_size, sigma, num_channels, dtype):
def _get_gaussian_kernel1d(size, sigma):
x = tf.range(size, dtype=dtype) - (size - 1) / 2
kernel1d = tf.exp(-0.5 * (x / sigma) ** 2)
return kernel1d / tf.reduce_sum(kernel1d)
def _get_gaussian_kernel2d(size, sigma):
size = tf.cast(size, dtype)
kernel1d_x = _get_gaussian_kernel1d(size[0], sigma[0])
kernel1d_y = _get_gaussian_kernel1d(size[1], sigma[1])
return tf.tensordot(kernel1d_y, kernel1d_x, axes=0)
kernel = _get_gaussian_kernel2d(kernel_size, sigma)
kernel = tf.reshape(kernel, (kernel_size[0], kernel_size[1], 1, 1))
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
kernel = tf.cast(kernel, dtype)
return kernel
images = convert_to_tensor(images)
dtype = backend.standardize_dtype(images.dtype)
kernel_size = convert_to_tensor(kernel_size, dtype=dtype)
sigma = convert_to_tensor(sigma, dtype=dtype)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
need_squeeze = False
if len(images.shape) == 3:
images = tf.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_first":
images = tf.transpose(images, (0, 2, 3, 1))
num_channels = tf.shape(images)[-1]
kernel = _create_gaussian_kernel(kernel_size, sigma, num_channels, dtype)
blurred_images = tf.nn.depthwise_conv2d(
images, kernel, strides=[1, 1, 1, 1], padding="SAME"
)
if data_format == "channels_first":
blurred_images = tf.transpose(blurred_images, (0, 3, 1, 2))
if need_squeeze:
blurred_images = tf.squeeze(blurred_images, axis=0)
return blurred_images
def elastic_transform(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
images = convert_to_tensor(images)
input_dtype = images.dtype
alpha = convert_to_tensor(alpha, dtype=input_dtype)
sigma = convert_to_tensor(sigma, dtype=input_dtype)
kernel_factor = convert_to_tensor(sigma, dtype="int32")
kernel_size = (6 * kernel_factor | 1, 6 * kernel_factor | 1)
need_squeeze = False
if len(images.shape) == 3:
images = tf.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_last":
batch_size, height, width, channels = images.shape
channel_axis = -1
else:
batch_size, channels, height, width = images.shape
channel_axis = 1
seed = draw_seed(seed)
if batch_size is None:
batch_size = 1
dx = (
tf.random.stateless_normal(
shape=(batch_size, height, width),
mean=0.0,
stddev=1.0,
dtype=input_dtype,
seed=seed,
)
* sigma
)
dy = (
tf.random.stateless_normal(
shape=(batch_size, height, width),
mean=0.0,
stddev=1.0,
dtype=input_dtype,
seed=seed,
)
* sigma
)
dx = gaussian_blur(
tf.expand_dims(dx, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dy = gaussian_blur(
tf.expand_dims(dy, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dx = tf.squeeze(dx, axis=channel_axis)
dy = tf.squeeze(dy, axis=channel_axis)
x, y = tf.meshgrid(
tf.range(width, dtype=input_dtype),
tf.range(height, dtype=input_dtype),
indexing="xy",
)
x = tf.expand_dims(x, axis=0)
y = tf.expand_dims(y, axis=0)
distorted_x = x + alpha * dx
distorted_y = y + alpha * dy
channel_outputs = []
if data_format == "channels_last":
for i in range(channels):
channel_transformed = tf.stack(
[
map_coordinates(
images[b, ..., i],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS.index(
interpolation
),
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
],
axis=0,
)
channel_outputs.append(channel_transformed)
transformed_images = tf.stack(channel_outputs, axis=-1)
else:
for i in range(channels):
channel_transformed = tf.stack(
[
map_coordinates(
images[b, i, ...],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS.index(
interpolation
),
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
],
axis=0,
)
channel_outputs.append(channel_transformed)
transformed_images = tf.stack(channel_outputs, axis=1)
if need_squeeze:
transformed_images = tf.squeeze(transformed_images, axis=0)
transformed_images = tf.cast(transformed_images, input_dtype)
return transformed_images
def _fill_triangle_kernel(x):
return tf.maximum(tf.constant(0, dtype=x.dtype), 1 - tf.abs(x))
def _fill_keys_cubic_kernel(x):
out = ((1.5 * x - 2.5) * x) * x + 1.0
out = tf.where(x >= 1.0, ((-0.5 * x + 2.5) * x - 4.0) * x + 2.0, out)
return tf.where(x >= 2.0, 0.0, out)
def _fill_lanczos_kernel(radius, x):
y = radius * tf.sin(np.pi * x) * tf.sin(np.pi * x / radius)
out = tf.where(
x > 1e-3, tf.divide(y, tf.where(x != 0, np.pi**2 * x**2, 1)), 1
)
return tf.where(x > radius, 0.0, out)
_kernels = {
"linear": _fill_triangle_kernel,
"cubic": _fill_keys_cubic_kernel,
"lanczos3": lambda x: _fill_lanczos_kernel(3.0, x),
"lanczos5": lambda x: _fill_lanczos_kernel(5.0, x),
}
def _compute_weight_mat(
input_size, output_size, scale, translation, kernel, antialias
):
dtype = backend.result_type(scale.dtype, translation.dtype)
inv_scale = 1.0 / scale
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/layer.py | keras/src/backend/tensorflow/layer.py | import tensorflow as tf
from keras.src import tree
from keras.src.backend.tensorflow.trackable import KerasAutoTrackable
from keras.src.utils import tf_utils
from keras.src.utils import tracking
class TFLayer(KerasAutoTrackable):
def __init__(self, *args, **kwargs):
# Export-related attributes
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
self._tracked = []
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization can trace layer calls.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs)
args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or [])
kwargs_spec = {}
# Filter out non-tensor arguments from kwargs.
for key, kwarg in kwargs.items():
flat_kwarg = tree.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs)
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = (
[inputs_spec] + list(args_spec),
kwargs_spec,
)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _trackable_children(self, save_type="checkpoint", **kwargs):
if save_type == "savedmodel":
# SavedModel needs to ignore the execution functions.
train_function = getattr(self, "train_function", None)
test_function = getattr(self, "test_function", None)
predict_function = getattr(self, "predict_function", None)
self.train_function = None
self.test_function = None
self.predict_function = None
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
# Convert Keras tracked collections to plain Python structures
# without creating TensorFlow trackable dependencies
self._convert_tracked_collections(children)
return children
def _convert_tracked_collections(self, children):
"""Convert TrackedList/Dict/Set to plain Python structures."""
for tracked_attr in self._tracked:
tracked_item = getattr(self, tracked_attr)
if isinstance(tracked_item, tracking.TrackedList):
children[tracked_attr] = list(tracked_item)
if isinstance(tracked_item, tracking.TrackedDict):
children[tracked_attr] = dict(tracked_item)
if isinstance(tracked_item, tracking.TrackedSet):
children[tracked_attr] = list(tracked_item)
def _get_save_spec(self, dynamic_batch=True):
"""Compatibility shim for TensorFlow saving utilities.
TensorFlow's SavedModel / TFLite export paths (e.g.,
tf.lite.TFLiteConverter.from_keras_model) expect a `_get_save_spec`
method on models. This method generates TensorSpec objects
describing the model's input signature.
Args:
dynamic_batch: whether to set the batch dimension to `None`.
Returns:
A TensorSpec, list or dict mirroring the model inputs, or
`None` when specs cannot be inferred.
"""
# Lazy import to avoid circular dependency
from keras.src.export.export_utils import make_tf_tensor_spec
# Fall back to building specs from `self.inputs`
inputs = getattr(self, "inputs", None)
if inputs is None:
return None
return tree.map_structure(
lambda x: make_tf_tensor_spec(x, dynamic_batch=dynamic_batch),
inputs,
)
@property
def _default_save_signature(self):
"""For SavedModel support: returns the default serving signature."""
from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
if not isinstance(self, Model):
return None
inputs = None
if (
isinstance(self, Sequential)
and getattr(self, "_functional", None) is not None
):
inputs = self._functional.input
elif isinstance(self, Functional):
inputs = self.input
if inputs is not None:
input_signature = (
tree.map_structure(
lambda x: tf.TensorSpec(x.shape, x.dtype), inputs
),
)
else:
input_signature = tuple(
tree.map_shape_structure(
lambda s: tf.TensorSpec(s, self.input_dtype), value
)
for value in self._build_shapes_dict.values()
)
@tf.function(input_signature=input_signature)
def serving_default(inputs):
return self(inputs)
return serving_default
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/export.py | keras/src/backend/tensorflow/export.py | import tensorflow as tf
class TFExportArchive:
def _track_layer(self, layer):
# Variables in the lists below are actually part of the trackables
# that get saved, because the lists are created in __init__.
variables = layer.variables
trainable_variables = layer.trainable_variables
non_trainable_variables = layer.non_trainable_variables
self._tf_trackable.variables += variables
self._tf_trackable.trainable_variables += trainable_variables
self._tf_trackable.non_trainable_variables += non_trainable_variables
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
decorated_fn = tf.function(
fn, input_signature=input_signature, autograph=False
)
return decorated_fn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/distribution_lib.py | keras/src/backend/tensorflow/distribution_lib.py | """!!!DO NOT USE!!!
Distribution related class for Tensorflow backend.
This is just a prototype and we might want to unify it
with other backends in the future.
"""
import tensorflow as tf
from tensorflow.experimental import dtensor
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note that this should return the global devices in a distributed setting.
Args:
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Default to `gpu` or
`tpu` if available when device_type is not provided. Otherwise will
return the `cpu` devices.
Return:
List of devices that are available for distribute computation.
"""
device_type = device_type.upper() if device_type else None
# DTensor doesn't support getting global devices, even when knowing the
# Mesh. Use TF API instead to get global devices. Coordinator service is
# enabled by default with DTensor, so that list_logical_devices() returns
# a list of global devices. More context can be found in b/254911601.
tf_devices = tf.config.list_logical_devices(device_type=device_type)
cpu_devices = []
other_devices = []
for device in tf_devices:
if device.device_type.lower() == "cpu":
cpu_devices.append(device)
else:
other_devices.append(device)
if device_type is None:
tf_devices = other_devices if len(other_devices) > 0 else cpu_devices
return [
f"{device.device_type.lower()}:{device.name.split(':')[-1]}"
for device in tf_devices
]
def distribute_value(value, tensor_layout):
# TODO
pass
def _to_backend_mesh(device_mesh):
"""Convert the DeviceMesh to Tensorflow backend specific Mesh.
Args:
device_mesh: DeviceMesh instance to convert.
Returns:
A `tf.dtensor.Mesh` instance.
"""
mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape))
return dtensor.create_distributed_mesh(
mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten()
)
def _to_backend_layout(tensor_layout):
"""Convert the TensorLayout to Tensorflow backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `tf.dtensor.Layout` instance.
"""
if tensor_layout.device_mesh is None:
raise ValueError(
"Cannot create sharding when device mesh is not set for "
"TensorLayout."
)
sharding_specs = [
axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes
]
dtensor_mesh = tensor_layout.device_mesh.backend_mesh
return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/distribute_test.py | keras/src/backend/tensorflow/distribute_test.py | """Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
import keras
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, _, data_iterator in epoch_iterator:
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
def test_variable_aggregation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
x = np.random.random((4, 4))
v1 = backend.Variable(x, dtype="float32")
self.assertEqual(v1.aggregation, "none")
self.assertEqual(v1.value.aggregation, tf.VariableAggregation.NONE)
v2 = backend.Variable(x, dtype="float32", aggregation="sum")
self.assertEqual(v2.aggregation, "sum")
self.assertEqual(v2.value.aggregation, tf.VariableAggregation.SUM)
def test_variable_synchronization(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
x = np.random.random((4, 4))
v1 = backend.Variable(x, dtype="float32")
self.assertEqual(v1.synchronization, "auto")
# AUTO with MirroredStrategy defaults to ON_WRITE
self.assertEqual(
v1.value.synchronization, tf.VariableSynchronization.ON_WRITE
)
v2 = backend.Variable(x, dtype="float32", synchronization="on_read")
self.assertEqual(v2.synchronization, "on_read")
self.assertEqual(
v2.value.synchronization, tf.VariableSynchronization.ON_READ
)
def test_seed_generator(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
seed_generator = keras.random.SeedGenerator(42)
states = strategy.run(lambda: seed_generator.state.value).values
for s in states:
self.assertAllClose(keras.ops.convert_to_numpy(s), (42, 0))
def test_correctness_with_fit_and_regularizer(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
batch_size = 12
x = keras.ops.ones((batch_size, 1))
y = keras.ops.zeros((batch_size, 1))
# Runs without a strategy to get expected weights.
inputs = layers.Input(shape=(1,))
layer = layers.Dense(
1,
use_bias=False,
kernel_initializer=keras.initializers.Constant(1),
kernel_regularizer=keras.regularizers.L1L2(l1=0.01, l2=0.01),
)
model = models.Model(inputs, layer(inputs))
model.compile(loss="mse", optimizer="sgd")
history = model.fit(x, y, batch_size=batch_size, epochs=1)
expected_loss = history.history["loss"]
expected_weights = keras.ops.convert_to_numpy(layer.kernel)
# Runs with a mirrored strategy.
with strategy.scope():
inputs = layers.Input(shape=(1,))
layer = layers.Dense(
1,
use_bias=False,
kernel_initializer=keras.initializers.Constant(1),
kernel_regularizer=keras.regularizers.L1L2(l1=0.01, l2=0.01),
)
model = models.Model(inputs, layer(inputs))
model.compile(loss="mse", optimizer="sgd")
history = model.fit(x, y, batch_size=batch_size, epochs=1)
weights = strategy.run(lambda: layer.kernel.value).values
self.assertAllClose(history.history["loss"], expected_loss)
for w in weights:
self.assertAllClose(
keras.ops.convert_to_numpy(w), expected_weights
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/optimizer.py | keras/src/backend/tensorflow/optimizer.py | """A class for Tensorflow specific optimizer logic.
The major behavior change for this class is for tf.distribute.
It will override methods from base Keras core Optimizer,
which provide distribute specific functionality, e.g. variable
creation, loss reduction, etc.
"""
import warnings
import tensorflow as tf
from keras.src import backend
from keras.src.backend.tensorflow.trackable import KerasAutoTrackable
from keras.src.optimizers import base_optimizer
class TFOptimizer(KerasAutoTrackable, base_optimizer.BaseOptimizer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distribution_strategy = tf.distribute.get_strategy()
def add_variable_from_reference(
self, reference_variable, name=None, initializer="zeros"
):
if isinstance(reference_variable, backend.Variable):
colocate_var = reference_variable.value
else:
colocate_var = reference_variable
with self._distribution_strategy.extended.colocate_vars_with(
colocate_var
):
return super().add_variable_from_reference(
reference_variable, name=name, initializer=initializer
)
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
# This is mainly due to the interaction with tf.distribute.Strategy,
# which requires tf.Variable as the inputs for most of its APIs.
raise ValueError(
"stateless_apply is not supported with the TensorFlow backend "
"(as it is incompatible with tf.distribute)."
)
def assign(self, variable, value):
if isinstance(variable, backend.Variable):
variable = variable.value
value = tf.cast(value, variable.dtype)
if isinstance(value, tf.IndexedSlices):
variable.scatter_update(value)
else:
variable.assign(value)
def assign_add(self, variable, value):
if isinstance(variable, backend.Variable):
variable = variable.value
value = tf.cast(value, variable.dtype)
if isinstance(value, tf.IndexedSlices):
variable.scatter_add(value)
else:
variable.assign_add(value)
def assign_sub(self, variable, value):
if isinstance(variable, backend.Variable):
variable = variable.value
value = tf.cast(value, variable.dtype)
if isinstance(value, tf.IndexedSlices):
variable.scatter_sub(value)
else:
variable.assign_sub(value)
def _var_key(self, variable):
if isinstance(variable, backend.Variable):
variable = variable.value # Convert to tf.Variable
if hasattr(variable, "_distributed_container"):
variable = variable._distributed_container()
elif (
isinstance(variable, tf.__internal__.CompositeTensor)
and hasattr(variable, "handle")
and hasattr(variable.handle, "_distributed_container")
):
# For ResourceVariables, the _distributed_container attribute
# is added to their handle tensors.
variable = variable.handle._distributed_container()
return variable._unique_id
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
def distributed_apply_weight_decay(distribution, variables, **kwargs):
def weight_decay_fn(variable):
if self._use_weight_decay(variable):
lr = tf.cast(self.learning_rate, variable.dtype)
wd = tf.cast(self.weight_decay, variable.dtype)
variable.assign_sub(variable * wd * lr)
for variable in variables:
if isinstance(variable, backend.Variable):
variable = variable.value # Convert to tf.Variable
distribution.extended.update(
variable, weight_decay_fn, group=False
)
tf.__internal__.distribute.interim.maybe_merge_call(
distributed_apply_weight_decay,
self._distribution_strategy,
variables,
)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
trainable_variables = [
v.value if isinstance(v, backend.Variable) else v
for v in trainable_variables
]
grads_and_vars = list(zip(grads, trainable_variables))
grads_and_vars = self._all_reduce_sum_gradients(grads_and_vars)
tf.__internal__.distribute.interim.maybe_merge_call(
self._distributed_tf_update_step,
self._distribution_strategy,
grads_and_vars,
learning_rate,
)
def _distributed_tf_update_step(
self, distribution, grads_and_vars, learning_rate
):
def apply_grad_to_update_var(var, grad, learning_rate):
return self.update_step(grad, var, learning_rate)
for grad, var in grads_and_vars:
distribution.extended.update(
var,
apply_grad_to_update_var,
args=(grad, learning_rate),
group=False,
)
def _all_reduce_sum_gradients(self, grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
List of (gradient, variable) pairs
where gradients have been all-reduced.
"""
replica_context = tf.distribute.get_replica_context()
if not replica_context:
return grads_and_vars
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads
)
else:
reduced = []
# Copy 'reduced' but add None gradients back in
reduced_with_nones = []
reduced_pos = 0
for g, v in grads_and_vars:
if g is None:
reduced_with_nones.append((None, v))
else:
reduced_with_nones.append((reduced[reduced_pos], v))
reduced_pos += 1
assert reduced_pos == len(reduced), "Failed to add all gradients"
return reduced_with_nones
def _overwrite_model_variables_with_average_value(
self, trainable_variables
):
"""Overwrite model variables with their moving average values.
This function overwrites variables on each device.
Args:
var_list: list of model variables.
"""
trainable_variables = [
v.value if isinstance(v, backend.Variable) else v
for v in trainable_variables
]
# Override model variable by the stored average value on all devices.
for var, average_var in zip(
trainable_variables, self._model_variables_moving_average
):
self._distribution_strategy.extended.update(
var, lambda a, b: a.assign(b), args=(average_var,)
)
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
def update_accumulator(var, grad):
var.assign(var + grad)
accumulators = [v.value for v in acc_grads]
def _distributed_tf_increment_grad_acc(
distribution, grads, accumulators
):
for grad, var in zip(grads, accumulators):
distribution.extended.update(
var, update_accumulator, args=(grad,), group=False
)
tf.__internal__.distribute.interim.maybe_merge_call(
_distributed_tf_increment_grad_acc,
self._distribution_strategy,
grads,
accumulators,
)
def _clip_by_norm(self, values, axes=None):
# We need to use TF-specific OP to support the case,
# when `values` are `tf.IndexedSlices`.
return tf.clip_by_norm(values, self.clipnorm, axes)
def filter_empty_gradients(grads_and_vars):
"""Filter out `(grad, var)` pairs that have a gradient equal to `None`."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
variable = ([v.name for _, v in grads_and_vars],)
raise ValueError(
f"No gradients provided for any variable: {variable}. "
f"Provided `grads_and_vars` is {grads_and_vars}."
)
if vars_with_empty_grads:
warnings.warn(
"Gradients do not exist for variables %s when minimizing the "
"loss. If you're using `model.compile()`, did you forget to "
"provide a `loss` argument?",
([v.name for v in vars_with_empty_grads]),
)
return filtered
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/rnn.py | keras/src/backend/tensorflow/rnn.py | import tensorflow as tf
from keras.src import tree
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
"""Iterates over the time dimension of a tensor.
Args:
step_function: RNN step function.
Args;
`input`; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
`states`; List of tensors.
Returns;
`output`; Tensor with shape `(samples, output_dim)`
(no time dimension).
`new_states`; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states
used in the step function. In the case that state_size is in a
nested shape, the shape of initial_states will also follow the
nested structure.
go_backwards: Boolean. If `True`, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: An integer or a 1-D Tensor, depending on whether
the time dimension is fixed-length or not. In case of variable
length input, it is used for masking in case there's no mask
specified.
time_major: Boolean. If `True`, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of
the RNN calculation. However, most TensorFlow data is batch-major,
so by default this function accepts input and emits output in
batch-major form.
zero_output_for_mask: Boolean. If `True`, the output for masked timestep
will be zeros, whereas in the `False` case, output from previous
timestep is returned.
return_all_outputs: Boolean. If `True`, return the recurrent outputs for
all timesteps in the sequence. If `False`, only return the output
for the last timestep (which consumes less memory).
Returns:
A tuple, `(last_output, outputs, new_states)`.
- `last_output`: the latest output of the rnn,
with shape `(samples, ...)`.
- `outputs`:
- If `return_all_outputs=True`: a tensor with shape
`(samples, time, ...)` where each entry `outputs[s, t]` is the
output of the step function at time `t` for sample `s`
- Else, a tensor equal to `last_output` with shape
`(samples, 1, ...)`
- `new_states`: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
"""
input_length = input_length or inputs.shape[1]
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return tf.transpose(input_t, axes)
if not time_major:
inputs = tree.map_structure(swap_batch_timestep, inputs)
flattened_inputs = tree.flatten(inputs)
time_steps = flattened_inputs[0].shape[0]
time_steps_t = (
tf.shape(flattened_inputs[0])[0] if time_steps is None else time_steps
)
for input_ in flattened_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != tf.bool:
mask = tf.cast(mask, tf.bool)
if len(mask.shape) == 2:
mask = tf.expand_dims(mask, axis=-1)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tree.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor, but got {mask_t}"
)
if tree.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor, but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = tf.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return tf.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = tf.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if tree.is_nested(inputs):
processed_input = tree.map_structure(
_process_single_input_t, inputs
)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return tree.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = tf.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = tf.zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = tf.where(tiled_mask_t, output, prev_output)
flat_states = tree.flatten(states)
flat_new_states = tree.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
)
flat_final_states = tuple(
tf.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
)
)
states = tree.pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = tf.stack(successive_outputs)
if zero_output_for_mask:
last_output = tf.where(
_expand_mask(mask_list[-1], last_output),
last_output,
tf.zeros_like(last_output),
)
outputs = tf.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
tf.zeros_like(outputs),
)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = tf.stack(successive_outputs)
else: # Unroll == False
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it
# will be flattened first, and tensor array will be created one per
# flattened tensor.
input_ta = tuple(
tf.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name=f"input_ta_{i}",
)
for i, inp in enumerate(flattened_inputs)
)
input_ta = tuple(
(
ta.unstack(input_)
if not go_backwards
else ta.unstack(tf.reverse(input_, [0]))
)
for ta, input_ in zip(input_ta, flattened_inputs)
)
# Get the time(0) input and compute the output for that, the output will
# be used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = tree.pack_sequence_as(
inputs, [inp[0] for inp in flattened_inputs]
)
# output_time_zero is used to determine the cell output shape and its
# dtype. the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants)
)
output_ta_size = time_steps_t if return_all_outputs else 1
output_ta = tuple(
tf.TensorArray(
dtype=out.dtype,
size=output_ta_size,
element_shape=out.shape,
tensor_array_name=f"output_ta_{i}",
)
for i, out in enumerate(tree.flatten(output_time_zero))
)
time = tf.constant(0, dtype="int32", name="time")
if input_length is None:
max_iterations = time_steps_t
else:
max_iterations = tf.reduce_max(input_length)
while_loop_kwargs = {
"cond": lambda time, *_: time < time_steps_t,
"maximum_iterations": max_iterations,
"parallel_iterations": 32,
"swap_memory": True,
}
if mask is not None:
if go_backwards:
mask = tf.reverse(mask, [0])
mask_ta = tf.TensorArray(
dtype=tf.bool, size=time_steps_t, tensor_array_name="mask_ta"
)
mask_ta = mask_ta.unstack(mask)
def masking_fn(time):
return mask_ta.read(time)
def compute_masked_output(mask_t, flat_out, flat_mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))
for o in flat_out
)
return tuple(
tf.where(m, o, fm)
for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask)
)
elif isinstance(input_length, tf.Tensor):
if go_backwards:
max_len = tf.reduce_max(input_length, axis=0)
rev_input_length = tf.subtract(max_len - 1, input_length)
def masking_fn(time):
return tf.less(rev_input_length, time)
else:
def masking_fn(time):
return tf.greater(input_length, time)
def compute_masked_output(mask_t, flat_out, flat_mask):
return tuple(
tf.where(mask_t, o, zo)
for (o, zo) in zip(flat_out, flat_mask)
)
else:
masking_fn = None
if masking_fn is not None:
# Mask for the T output will be base on the output of T - 1. In the
# case T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(
tf.zeros_like(o) for o in tree.flatten(output_time_zero)
)
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = tree.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
# mask output
flat_output = tree.flatten(output)
flat_mask_output = (
flat_zero_output
if zero_output_for_mask
else tree.flatten(prev_output)
)
flat_new_output = compute_masked_output(
mask_t, flat_output, flat_mask_output
)
# mask states
flat_state = tree.flatten(states)
flat_new_state = tree.flatten(new_states)
flat_final_state = compute_masked_output(
mask_t, flat_new_state, flat_state
)
new_states = tree.pack_sequence_as(new_states, flat_final_state)
ta_index_to_write = time if return_all_outputs else 0
output_ta_t = tuple(
ta.write(ta_index_to_write, out)
for ta, out in zip(output_ta_t, flat_new_output)
)
return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(
new_states
)
final_outputs = tf.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs,
)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = tree.pack_sequence_as(inputs, current_input)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
flat_new_state = tree.flatten(new_states)
flat_output = tree.flatten(output)
ta_index_to_write = time if return_all_outputs else 0
output_ta_t = tuple(
ta.write(ta_index_to_write, out)
for ta, out in zip(output_ta_t, flat_output)
)
new_states = tree.pack_sequence_as(
initial_states, flat_new_state
)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = tf.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs,
)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = tree.pack_sequence_as(output_time_zero, outputs)
last_output = tree.pack_sequence_as(output_time_zero, last_output)
if not time_major:
outputs = tree.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
def gru(
inputs,
initial_state,
mask,
kernel,
recurrent_kernel,
bias,
activation,
recurrent_activation,
return_sequences=False,
go_backwards=False,
unroll=False,
time_major=False,
reset_after=True,
):
cudnn_supported = cudnn_ok(
activation,
recurrent_activation,
unroll,
use_bias=bias is not None,
reset_after=reset_after,
)
if not cudnn_supported:
raise NotImplementedError
from keras.src.backend.tensorflow import Variable
if isinstance(kernel, Variable):
kernel = kernel.value
if isinstance(recurrent_kernel, Variable):
recurrent_kernel = recurrent_kernel.value
if isinstance(bias, Variable):
bias = bias.value
try:
return _cudnn_gru(
inputs,
initial_state,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
return_sequences,
)
except tf.errors.InvalidArgumentError:
# cuDNN op not found.
raise NotImplementedError
except tf.errors.NotFoundError:
# alternative error: device not found for op
raise NotImplementedError
def _do_gru_arguments_support_cudnn(
activation,
recurrent_activation,
unroll,
use_bias,
reset_after,
):
from keras.src import activations
from keras.src import ops
return (
activation in (activations.tanh, tf.tanh, ops.tanh)
and recurrent_activation
in (activations.sigmoid, tf.sigmoid, ops.sigmoid)
and not unroll
and use_bias
and reset_after
)
def _do_lstm_arguments_support_cudnn(
activation,
recurrent_activation,
unroll,
use_bias,
):
from keras.src import activations
from keras.src import ops
return (
activation in (activations.tanh, tf.tanh, ops.tanh)
and recurrent_activation
in (activations.sigmoid, tf.sigmoid, ops.sigmoid)
and not unroll
and use_bias
)
def _has_fully_masked_sequence(mask):
# Cudnn kernel will error out if the input sequence contains any
# fully masked data. We walk around this issue by rerouting the computation
# to standard kernel, until the issue on cudnn side has been fixed. For a
# fully masked sequence, it will contain all Falses. To make it easy to
# check, we inverse the boolean, check if any of the sequence has all True.
return tf.reduce_any(
tf.reduce_all(tf.logical_not(tf.cast(mask, dtype="bool")), axis=1)
)
def _assert_valid_mask(mask):
valid = tf.logical_and(
tf.logical_not(_has_fully_masked_sequence(mask)),
_is_sequence_right_padded(mask),
)
tf.Assert(
valid,
[
(
"You are passing a RNN mask that does not correspond to "
"right-padded sequences, while using cuDNN, which is not "
"supported. With cuDNN, RNN masks can only be used for "
"right-padding, e.g. `[[True, True, False, False]]` would "
"be a valid mask, but any mask that isn't just contiguous "
"`True`'s on the left and contiguous `False`'s on the right "
"would be invalid. You can pass `use_cudnn=False` to your "
"RNN layer to stop using cuDNN (this may be slower)."
)
],
)
def _standardize_cudnn_weights(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to cuDNN compatible parameter.
Note that Keras weights for kernels are different from the cuDNN format.
Eg.:
```
Keras cuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to cuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to cuDNN ops as param.
"""
def convert(w):
return tf.transpose(w) if transpose_weights else w
weights = [tf.reshape(convert(x), shape) for x in weights]
biases = [tf.reshape(x, shape) for x in biases]
return tf.concat(weights + biases, axis=0)
def _is_sequence_right_padded(mask):
"""Check the mask tensor and see if it right padded.
cuDNN uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then cuDNN won't work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and
not pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep]
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
max_seq_length = tf.shape(mask)[1]
count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)
right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length)
return tf.reduce_all(
tf.equal(
tf.cast(mask, dtype="bool"),
tf.cast(right_padded_mask, dtype="bool"),
)
)
def _compute_sequence_length_from_mask(mask, time_major):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False],
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should
be 1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch]
if time_major=True.
time_major: Boolean, which indicates whether the mask is time major or
batch major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if time_major else 1
return tf.reduce_sum(tf.cast(mask, tf.int32), axis=timestep_index)
def _is_gpu_available():
return bool(tf.config.list_logical_devices("GPU"))
def _cudnn_gru(
inputs,
initial_state,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
return_sequences,
):
"""GRU with cuDNN implementation which is only available for GPU."""
if mask is not None:
_assert_valid_mask(mask)
sequence_lengths = _compute_sequence_length_from_mask(mask, time_major)
else:
if time_major:
batch_dim = tf.shape(inputs)[1]
max_sequence_length = tf.shape(inputs)[0]
else:
batch_dim = tf.shape(inputs)[0]
max_sequence_length = tf.shape(inputs)[1]
sequence_lengths = tf.fill([batch_dim], max_sequence_length)
if not time_major and sequence_lengths is None:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h, cuDNN expects one more dim of num_layers before or after batch
# dim for time major or batch major inputs respectively
init_h = tf.expand_dims(initial_state, axis=seq_axis)
weights = tf.split(kernel, 3, axis=1)
weights += tf.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flatten it to
# (6 * units)
bias = tf.split(tf.reshape(bias, [-1]), 6)
if tf.sysconfig.get_build_info()["is_cuda_build"]:
# Note that the gate order for cuDNN is different from the canonical
# format. canonical format is [z, r, h], whereas cuDNN is [r, z, h].
# The swap need to be done for kernel, recurrent_kernel, input_bias,
# recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = _standardize_cudnn_weights(
weights=weights,
biases=bias,
shape=tf.constant([-1]),
transpose_weights=True,
)
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = tf.reverse_sequence(
inputs,
sequence_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis,
)
outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(
input=inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode="gru",
sequence_lengths=sequence_lengths,
time_major=time_major,
)
if go_backwards:
outputs = tf.reverse_sequence(
outputs,
sequence_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis,
)
outputs = tf.reverse(outputs, axis=[seq_axis])
last_output = outputs[-1]
if not time_major and sequence_lengths is None and return_sequences:
outputs = tf.transpose(outputs, perm=[1, 0, 2])
state = tf.squeeze(h, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the
# previous output for t-1, so that in the return_sequence=False case, user
# can quickly get the final effect output instead just 0s at the last
# timestep. In order to mimic the default keras behavior, we copy the final
# h state as the last_output, since it is numerically same as the output.
if sequence_lengths is not None:
last_output = state
# Match CPU return format
if not return_sequences:
outputs = tf.expand_dims(last_output, axis=0 if time_major else 1)
return (
last_output,
outputs,
[state],
)
def cudnn_ok(
activation,
recurrent_activation,
unroll,
use_bias,
reset_after=None,
):
if reset_after is None:
args_supported = _do_lstm_arguments_support_cudnn(
activation=activation,
recurrent_activation=recurrent_activation,
unroll=unroll,
use_bias=use_bias,
)
else:
args_supported = _do_gru_arguments_support_cudnn(
activation=activation,
recurrent_activation=recurrent_activation,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after,
)
return args_supported and _is_gpu_available()
def lstm(
inputs,
initial_state_h,
initial_state_c,
mask,
kernel,
recurrent_kernel,
bias,
activation,
recurrent_activation,
return_sequences=False,
go_backwards=False,
unroll=False,
time_major=False,
):
cudnn_supported = cudnn_ok(
activation, recurrent_activation, unroll, use_bias=bias is not None
)
if not cudnn_supported:
raise NotImplementedError
from keras.src.backend.tensorflow import Variable
if isinstance(kernel, Variable):
kernel = kernel.value
if isinstance(recurrent_kernel, Variable):
recurrent_kernel = recurrent_kernel.value
if isinstance(bias, Variable):
bias = bias.value
try:
return _cudnn_lstm(
inputs,
initial_state_h,
initial_state_c,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
return_sequences,
)
except tf.errors.InvalidArgumentError:
# cuDNN op not found.
raise NotImplementedError
except tf.errors.NotFoundError:
# alternative error: device not found for op
raise NotImplementedError
def _cudnn_lstm(
inputs,
initial_state_h,
initial_state_c,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
return_sequences,
):
if mask is not None:
_assert_valid_mask(mask)
sequence_lengths = _compute_sequence_length_from_mask(mask, time_major)
else:
if time_major:
batch_dim = tf.shape(inputs)[1]
max_sequence_length = tf.shape(inputs)[0]
else:
batch_dim = tf.shape(inputs)[0]
max_sequence_length = tf.shape(inputs)[1]
sequence_lengths = tf.fill([batch_dim], max_sequence_length)
if not time_major and sequence_lengths is None:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h and init_c, cuDNN expects one more dim of num_layers before or
# after batch dim for time major or batch major inputs respectively
init_h = tf.expand_dims(initial_state_h, axis=seq_axis)
init_c = tf.expand_dims(initial_state_c, axis=seq_axis)
weights = tf.split(kernel, 4, axis=1)
weights += tf.split(recurrent_kernel, 4, axis=1)
# cuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = tf.concat((tf.zeros_like(bias), bias), 0)
if tf.sysconfig.get_build_info()["is_rocm_build"]:
# ROCm MIOpen's weight sequence for LSTM is different from both
# canonical and Cudnn format
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
# i is input gate weights.
# f is forget gate weights.
# o is output gate weights.
# c is cell gate weights.
weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
# full_bias is a tensor of shape (8*n,)
full_bias = tf.split(full_bias, 8, axis=0)
full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
params = _standardize_cudnn_weights(
weights=weights,
biases=tf.split(full_bias, 8),
shape=tf.constant([-1]),
transpose_weights=True,
)
if go_backwards:
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/core.py | keras/src/backend/tensorflow/core.py | import builtins
import numpy as np
import tensorflow as tf
from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
from keras.src import tree
from keras.src.backend.common import KerasVariable
from keras.src.backend.common import global_state
from keras.src.backend.common import is_int_dtype
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.backend_utils import slice_along_axis
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.name_scope import name_scope as base_name_scope
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.tensorflow.sparse import sparse_to_dense
from keras.src.utils.naming import auto_name
SUPPORTS_SPARSE_TENSORS = True
SUPPORTS_RAGGED_TENSORS = True
# https://github.com/tensorflow/tensorflow/issues/78338
IS_THREAD_SAFE = False
class Variable(
KerasVariable,
tf.__internal__.types.Tensor,
tf.__internal__.tracking.Trackable,
):
_should_act_as_resource_variable = True
@property
def handle(self):
return self.value.handle
def _initialize(self, value):
if isinstance(value, tf.Variable):
self._value = value
else:
self._value = tf.Variable(
value,
dtype=self._dtype,
trainable=self.trainable,
name=self.name,
aggregation=self._map_aggregation(self.aggregation),
synchronization=self._map_synchronization(self.synchronization),
)
def _initialize_with_initializer(self, initializer):
self._initialize(lambda: initializer(self._shape, dtype=self._dtype))
def _deferred_initialize(self):
if self._value is not None:
raise ValueError(f"Variable {self.path} is already initialized.")
if in_stateless_scope():
raise ValueError(
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are initialized "
"before you start using your layer/model objects."
)
with tf.init_scope():
self._initialize_with_initializer(self._initializer)
self._initializer = None
def _direct_assign(self, value):
self._value.assign(tf.cast(value, self._value.dtype))
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
def numpy(self): # noqa: F811
return self.value.numpy()
@property
def shape(self):
return tf.TensorShape(super().shape)
# Overload native accessor.
def __tf_tensor__(self, dtype=None, name=None):
return tf.convert_to_tensor(self.value, dtype=dtype, name=name)
# Methods below are for SavedModel support
@property
def _shared_name(self):
return self.value._shared_name
def _serialize_to_tensors(self):
try:
return self.value._serialize_to_tensors()
except NotImplementedError:
return {"VARIABLE_VALUE": self.value}
def _restore_from_tensors(self, restored_tensors):
try:
return self.value._restore_from_tensors(restored_tensors)
except NotImplementedError:
self.assign(restored_tensors["VARIABLE_VALUE"])
return self.value
def _copy_trackable_to_cpu(self, object_map):
self.value._copy_trackable_to_cpu(object_map)
object_map[self] = tf.Variable(object_map[self.value])
def _export_to_saved_model_graph(
self, object_map, tensor_map, options, **kwargs
):
resource_list = self.value._export_to_saved_model_graph(
object_map, tensor_map, options, **kwargs
)
object_map[self] = tf.Variable(object_map[self.value])
return resource_list
def _write_object_proto(self, proto, options):
return self.value._write_object_proto(proto, options)
def _map_aggregation(self, aggregation):
mapping = {
"none": tf.VariableAggregation.NONE,
"sum": tf.VariableAggregation.SUM,
"mean": tf.VariableAggregation.MEAN,
"only_first_replica": tf.VariableAggregation.ONLY_FIRST_REPLICA,
}
return mapping[aggregation]
def _map_synchronization(self, synchronization):
mapping = {
"none": tf.VariableSynchronization.NONE,
"on_read": tf.VariableSynchronization.ON_READ,
"on_write": tf.VariableSynchronization.ON_WRITE,
"auto": tf.VariableSynchronization.AUTO,
}
return mapping[synchronization]
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
if isinstance(x, tf.SparseTensor) and sparse is not None and not sparse:
x = sparse_to_dense(x)
if isinstance(x, tf.RaggedTensor) and ragged is not None and not ragged:
x = x.to_tensor()
if dtype is not None:
dtype = standardize_dtype(dtype)
if not tf.is_tensor(x):
if dtype == "bool" or is_int_dtype(dtype):
# TensorFlow conversion is stricter than other backends, it does not
# allow ints for bools or floats for ints. We convert without dtype
# and cast instead.
x = tf.convert_to_tensor(x)
return tf.cast(x, dtype)
return tf.convert_to_tensor(x, dtype=dtype)
elif dtype is not None and not standardize_dtype(x.dtype) == dtype:
if isinstance(x, tf.SparseTensor):
x_shape = x.shape
x = tf.cast(x, dtype)
x.set_shape(x_shape)
return x
return tf.cast(x, dtype=dtype)
return x
def convert_to_numpy(x):
if isinstance(x, tf.SparseTensor):
x = sparse_to_dense(x)
elif isinstance(x, tf.IndexedSlices):
x = tf.convert_to_tensor(x)
elif isinstance(x, tf.RaggedTensor):
x = x.to_tensor()
return np.array(x)
def is_tensor(x):
return tf.is_tensor(x)
def shape(x):
"""Always return a tuple shape.
`tf.shape` will return a `tf.Tensor`, which differs from the tuple return
type on the torch and jax backends. We write our own method instead which
always returns a tuple, with integer values when the shape is known, and
tensor values when the shape is unknown (this is tf specific, as dynamic
shapes do not apply in other backends).
"""
if isinstance(x, KerasTensor):
return x.shape
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x)
if x.shape == tf.TensorShape(None):
raise ValueError(
"All tensors passed to `ops.shape` must have a statically known "
f"rank. Received: x={x} with unknown rank."
)
shape = x.shape.as_list()
dynamic = tf.shape(x)
for i in range(len(shape)):
if shape[i] is None:
try:
shape[i] = dynamic[i]
except:
# With RaggedTensors, accessing a ragged dimension will fail,
# we leave it as None.
pass
return tuple(shape)
def cast(x, dtype):
dtype = standardize_dtype(dtype)
if isinstance(x, tf.SparseTensor):
x_shape = x.shape
x = tf.cast(x, dtype)
x.set_shape(x_shape)
return x
else:
return tf.cast(x, dtype=dtype)
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope(), SymbolicScope():
graph_name = auto_name("scratch_graph")
with tf.__internal__.FuncGraph(graph_name).as_default():
def convert_keras_tensor_to_tf(x):
if isinstance(x, KerasTensor):
if x.sparse:
return tf.compat.v1.sparse_placeholder(
shape=x.shape, dtype=x.dtype
)
else:
return tf.compat.v1.placeholder(
shape=x.shape, dtype=x.dtype
)
return x
args, kwargs = tree.map_structure(
convert_keras_tensor_to_tf, (args, kwargs)
)
tf_out = fn(*args, **kwargs)
def convert_tf_to_keras_tensor(x):
if tf.is_tensor(x):
return KerasTensor(
x.shape, x.dtype, sparse=isinstance(x, tf.SparseTensor)
)
return x
output_spec = tree.map_structure(convert_tf_to_keras_tensor, tf_out)
return output_spec
def cond(pred, true_fn, false_fn):
if isinstance(pred, tf.Variable):
return tf.cond(pred, true_fn=true_fn, false_fn=false_fn)
return tf.__internal__.smart_cond.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn
)
def vectorized_map(function, elements):
return tf.vectorized_map(function, elements)
def map(f, xs):
xs = tree.map_structure(convert_to_tensor, xs)
def get_fn_output_signature(x):
out = f(x)
return tree.map_structure(tf.TensorSpec.from_tensor, out)
if tree.is_nested(xs):
input = tree.pack_sequence_as(xs, [x[0] for x in tree.flatten(xs)])
fn_output_signature = get_fn_output_signature(input)
return tf.map_fn(f, xs, fn_output_signature=fn_output_signature)
else:
fn_output_signature = get_fn_output_signature(xs[0])
return tf.map_fn(f, xs, fn_output_signature=fn_output_signature)
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
# We have reimplemented `scan` to match the behavior of `jax.lax.scan`
# Ref: tf.scan, jax.lax.scan
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
if not isinstance(unroll, bool):
if not isinstance(unroll, int) or unroll < 1:
raise ValueError(
"`unroll` must be an positive integer or boolean. "
f"Received: unroll={unroll}"
)
if xs is None and length is None:
raise ValueError("Got no `xs` to scan over and `length` not provided.")
input_is_sequence = tree.is_nested(xs)
output_is_sequence = tree.is_nested(init)
def pack_input(x):
return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0]
def pack_output(x):
return tree.pack_sequence_as(init, x) if output_is_sequence else x[0]
if xs is None:
xs_flat = []
n = int(length)
else:
# xs_flat = flatten_input(xs)
xs_flat = tree.flatten(xs)
xs_flat = [tf.convert_to_tensor(elem) for elem in xs_flat]
n = int(length) if length is not None else tf.shape(xs_flat[0])[0]
# TensorArrays are always flat
xs_array = [
tf.TensorArray(
dtype=x.dtype,
size=n,
dynamic_size=False,
element_shape=x.shape[1:],
infer_shape=True,
)
for x in xs_flat
]
xs_array = [x_a.unstack(x) for x_a, x in zip(xs_array, xs_flat)]
init_flat = tree.flatten(init)
carry_flat = [tf.convert_to_tensor(init) for init in init_flat]
# Store the intermediate values
# Note: there is a constraint that the output of `f` must have the same
# shape and dtype as carry (`init`).
ys_array = [
tf.TensorArray(
dtype=carry.dtype,
size=n,
dynamic_size=False,
element_shape=carry.shape,
infer_shape=True,
)
for carry in carry_flat
]
carry_array = [
tf.TensorArray(
dtype=carry.dtype,
size=1,
dynamic_size=False,
clear_after_read=False,
element_shape=carry.shape,
infer_shape=True,
)
for carry in carry_flat
]
carry_array = [
carry.write(0, c) for (carry, c) in zip(carry_array, carry_flat)
]
def loop_body(i, carry_array, ys_array):
packed_xs = (
pack_input([xs.read(i) for xs in xs_array])
if len(xs_array) > 0
else None
)
packed_carry = pack_output([carry.read(0) for carry in carry_array])
carry, ys = f(packed_carry, packed_xs)
if ys is not None:
flat_ys = tree.flatten(ys)
ys_array = [ys.write(i, v) for (ys, v) in zip(ys_array, flat_ys)]
if carry is not None:
flat_carry = tree.flatten(carry)
carry_array = [
carry.write(0, v) for (carry, v) in zip(carry_array, flat_carry)
]
next_i = i + 1 if not reverse else i - 1
return (next_i, carry_array, ys_array)
if isinstance(unroll, bool):
unroll = max(n, 1) if unroll else 1
_, carry_array, ys_array = tf.while_loop(
lambda i, _1, _2: i >= 0 if reverse else i < n,
loop_body,
(n - 1 if reverse else 0, carry_array, ys_array),
parallel_iterations=unroll,
)
ys_flat = [ys.stack() for ys in ys_array]
carry_flat = [carry.read(0) for carry in carry_array]
if xs is not None:
n_static = xs_flat[0].get_shape().with_rank_at_least(1)[0]
if not isinstance(n_static, int):
for x in xs_flat[1:]:
n_static.assert_is_compatible_with(
x.get_shape().with_rank_at_least(1)[0]
)
for r in ys_flat:
r.set_shape(tf.TensorShape(n_static).concatenate(r.get_shape()[1:]))
return pack_output(carry_flat), pack_output(ys_flat)
def associative_scan(f, elems, reverse=False, axis=0):
# Implementation is the same as tfp.math.scan_associative
# with additional checks to ensure similar behavior with jax
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
elems_flat = tree.flatten(elems)
elems_flat = [tf.convert_to_tensor(elem) for elem in elems_flat]
if reverse:
elems_flat = [tf.reverse(elem, [axis]) for elem in elems_flat]
def _combine(a_flat, b_flat):
a = tree.pack_sequence_as(elems, a_flat)
b = tree.pack_sequence_as(elems, b_flat)
c = f(a, b)
c_flat = tree.flatten(c)
return c_flat
def _get_dim(x):
return shape(x)[axis]
# TODO add constant dim check
num_elems = _get_dim(elems_flat[0])
if not all(_get_dim(elem) == num_elems for elem in elems_flat[1:]):
raise ValueError(
"Array inputs to associative_scan must have the same "
"first dimension. (saw: {})".format(
[tf.shape(elem) for elem in elems_flat]
)
)
def _interleave(a, b, axis):
# [a b c ...] [d e f ...] -> [a d b e c f ...]
num_elems_a = _get_dim(a)
num_elems_b = _get_dim(b)
# Note that interleaving implies rank(a)==rank(b).
axis = tf.where(axis >= 0, axis, tf.rank(a) + axis)
axis = (
int(axis) # Avoid ndarray values.
if tf.get_static_value(axis) is not None
else axis
)
def _interleave_with_b(a):
return tf.reshape(
# Work around lack of support for Tensor axes in
# `tf.stack` by using `concat` and `expand_dims` instead.
tf.concat(
[
tf.expand_dims(a, axis=axis + 1),
tf.expand_dims(b, axis=axis + 1),
],
axis=axis + 1,
),
tf.concat(
[
a.get_shape()[:axis],
[2 * num_elems_b],
a.get_shape()[axis + 1 :],
],
axis=0,
),
)
return tf.cond(
tf.equal(num_elems_a, num_elems_b + 1),
lambda: tf.concat(
[
_interleave_with_b(
slice_along_axis(a, None, -1, axis=axis)
),
slice_along_axis(a, -1, None, axis=axis),
],
axis=axis,
),
lambda: _interleave_with_b(a),
)
def _scan(elems):
elem_length = _get_dim(elems[0])
a = [slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems]
b = [
slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems
]
reduced_elems = _combine(a, b)
def _handle_base_case_elem_length_two():
return [
tf.concat(
[slice_along_axis(elem, 0, 1, axis=axis), reduced_elem],
axis=axis,
)
for (reduced_elem, elem) in zip(reduced_elems, elems)
]
def _handle_base_case_elem_length_three():
reduced_reduced_elems = _combine(
reduced_elems,
[slice_along_axis(elem, 2, 3, axis=axis) for elem in elems],
)
return [
tf.concat(
[
slice_along_axis(elem, 0, 1, axis=axis),
reduced_elem,
reduced_reduced_elem,
],
axis=axis,
)
for (reduced_reduced_elem, reduced_elem, elem) in zip(
reduced_reduced_elems, reduced_elems, elems
)
]
at_base_case = tf.logical_or(
tf.equal(elem_length, 2), tf.equal(elem_length, 3)
)
def _base_case():
return tf.cond(
tf.equal(elem_length, 2),
_handle_base_case_elem_length_two,
_handle_base_case_elem_length_three,
)
def _recursive_case():
odd_elems = _scan(reduced_elems)
def _even_length_case():
return _combine(
[
slice_along_axis(odd_elem, 0, -1, axis=axis)
for odd_elem in odd_elems
],
[
slice_along_axis(elem, 2, None, 2, axis=axis)
for elem in elems
],
)
def _odd_length_case():
return _combine(
[odd_elem for odd_elem in odd_elems],
[
slice_along_axis(elem, 2, None, 2, axis=axis)
for elem in elems
],
)
results = tf.cond(
tf.equal(elem_length % 2, 0),
_even_length_case,
_odd_length_case,
)
even_elems = [
tf.concat(
[slice_along_axis(elem, 0, 1, axis=axis), result], axis=axis
)
for (elem, result) in zip(elems, results)
]
return list(
builtins.map(
lambda a, b: _interleave(a, b, axis=axis),
even_elems,
odd_elems,
)
)
return tf.cond(at_base_case, _base_case, _recursive_case)
scans = _scan(elems_flat)
if reverse:
scans = [tf.reverse(scanned, [axis]) for scanned in scans]
return tree.pack_sequence_as(elems, scans)
def scatter(indices, values, shape):
return tf.scatter_nd(indices, values, shape)
def scatter_update(inputs, indices, updates):
return tf.tensor_scatter_nd_update(inputs, indices, updates)
def slice(inputs, start_indices, shape):
return tf.slice(inputs, start_indices, shape)
def slice_update(inputs, start_indices, updates):
return dynamic_update_slice(inputs, updates, start_indices)
def switch(index, branches, *operands):
index = convert_to_tensor(index, "int32")
index = tf.clip_by_value(index, 0, len(branches) - 1)
# Workaround to deal with python closures. More details:
# https://github.com/tensorflow/tensorflow/issues/8776#issuecomment-311383887
def gen_fn(i):
return lambda: branches[i](*operands)
branch_fns = [gen_fn(i) for i in range(len(branches))]
return tf.switch_case(index, branch_fns)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
is_tuple = isinstance(loop_vars, (tuple, list))
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
def _body(*args):
outputs = body(*args)
return tuple(outputs) if is_tuple else (outputs,)
outputs = tf.while_loop(
cond,
_body,
loop_vars,
maximum_iterations=maximum_iterations,
)
return outputs if is_tuple else outputs[0]
def fori_loop(lower, upper, body_fun, init_val):
return tf.while_loop(
lambda i, val: i < upper,
lambda i, val: (i + 1, body_fun(i, val)),
(lower, init_val),
)[1]
def stop_gradient(variable):
return tf.stop_gradient(variable)
def unstack(x, num=None, axis=0):
return tf.unstack(x, num=num, axis=axis)
def random_seed_dtype():
# tensorflow random operation only works on int32/int64, not uint32.
return "int64"
def custom_gradient(fun):
return tf.custom_gradient(f=fun)
def remat(f):
"""Implementation of rematerialization.
Args:
f: The function or operation to rematerialize.
Returns:
A function wrapping f that defines a custom gradient, which
recomputes f on the backwards pass of a gradient call.
"""
return tf.recompute_grad(f)
class name_scope(base_name_scope):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._tf_name_scope = tf.name_scope(name)
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
self._tf_name_scope.__enter__()
return self
def __exit__(self, *args, **kwargs):
super().__exit__(*args, **kwargs)
if self._pop_on_exit:
self._tf_name_scope.__exit__(*args, **kwargs)
def device_scope(device_name):
return tf.device(device_name)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/nn.py | keras/src/backend/tensorflow/nn.py | import math
import warnings
import tensorflow as tf
from keras.src import backend
from keras.src.backend.common.backend_utils import (
compute_adaptive_pooling_window_sizes,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import convert_to_tensor
def relu(x):
return tf.nn.relu(x)
def relu6(x):
return tf.nn.relu6(x)
def sigmoid(x):
logits = x
output = tf.nn.sigmoid(x)
output._keras_logits = logits
return output
def sparse_sigmoid(x):
x = convert_to_tensor(x)
return tf.where(
x <= -1,
tf.constant(0.0, dtype=x.dtype),
tf.where(x >= 1, tf.constant(1.0, dtype=x.dtype), 0.5 * (x + 1)),
)
def tanh(x):
return tf.nn.tanh(x)
def tanh_shrink(x):
return x - tf.math.tanh(x)
def softplus(x):
return tf.math.softplus(x)
def softsign(x):
return tf.nn.softsign(x)
def soft_shrink(x, threshold=0.5):
return tf.where(
x > threshold,
x - threshold,
tf.where(x < -threshold, x + threshold, tf.zeros_like(x)),
)
def sparse_plus(x):
return tf.where(
x <= -1,
tf.zeros_like(x),
tf.where(x < 1, (1 / 4) * tf.pow(x + 1, 2), x),
)
def silu(x):
return tf.nn.silu(x)
def squareplus(x, b=4):
x = convert_to_tensor(x)
b = convert_to_tensor(b, dtype=x.dtype)
y = x + tf.sqrt(tf.square(x) + b)
return y / 2
def log_sigmoid(x):
return tf.math.log_sigmoid(x)
def leaky_relu(x, negative_slope=0.2):
return tf.nn.leaky_relu(x, alpha=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return relu6(x + tf.constant(3.0, x.dtype)) / tf.constant(6.0, x.dtype)
def hard_silu(x):
return x * hard_sigmoid(x)
def elu(x, alpha=1.0):
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
def selu(x):
return tf.nn.selu(x)
def gelu(x, approximate=True):
x = convert_to_tensor(x)
return tf.nn.gelu(x, approximate=approximate)
def celu(x, alpha=1.0):
return tf.maximum(x, 0.0) + alpha * tf.math.expm1(
tf.minimum(x, 0.0) / alpha
)
def glu(x, axis=-1):
if x.shape[axis] % 2 != 0:
raise ValueError(
"axis size must be divisible by 2. "
f"Received: x.shape={x.shape} with axis={axis}"
)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=axis)
return x1 * tf.sigmoid(x2)
def hard_tanh(x):
return tf.clip_by_value(x, clip_value_min=-1.0, clip_value_max=1.0)
def hard_shrink(x, threshold=0.5):
return tf.where(tf.abs(x) > threshold, x, tf.zeros_like(x))
def threshold(x, threshold, default_value):
return tf.where(x > threshold, x, default_value)
def softmax(x, axis=-1):
logits = x
if axis is None:
# Unlike numpy, tf will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = tf.reshape(x, [-1])
output = tf.nn.softmax(output, axis=-1)
output = tf.reshape(output, tf.shape(x))
else:
output = tf.nn.softmax(x, axis=axis)
output._keras_logits = logits
return output
def log_softmax(x, axis=-1):
if axis is None:
# Unlike numpy, tf will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = tf.reshape(x, [-1])
output = tf.nn.log_softmax(output, axis=-1)
return tf.reshape(output, tf.shape(x))
return tf.nn.log_softmax(x, axis=axis)
def sparsemax(x, axis=-1):
# Sort logits along the specified axis in descending order
logits = convert_to_tensor(x)
logits_sorted = tf.sort(logits, direction="DESCENDING", axis=axis)
logits_cumsum = tf.cumsum(logits_sorted, axis=axis)
r = tf.range(1, tf.shape(logits)[axis] + 1, dtype=logits.dtype)
r_shape = [1] * len(logits.shape)
r_shape[axis] = -1 # Broadcast to match the target axis
r = tf.reshape(r, r_shape) # Reshape for broadcasting
support = logits_sorted - (logits_cumsum - 1) / r > 0
# Find the threshold
logits_cumsum_safe = tf.where(support, logits_cumsum, 0.0)
k = tf.reduce_sum(tf.cast(support, logits.dtype), axis=axis, keepdims=True)
tau = (tf.reduce_sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
output = tf.maximum(logits - tau, 0.0)
return output
def _transpose_spatial_inputs(inputs):
num_spatial_dims = len(inputs.shape) - 2
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
if num_spatial_dims == 1:
inputs = tf.transpose(inputs, (0, 2, 1))
elif num_spatial_dims == 2:
inputs = tf.transpose(inputs, (0, 2, 3, 1))
elif num_spatial_dims == 3:
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
else:
raise ValueError(
"Pooling inputs's shape must be 3, 4 or 5, corresponding to 1D, 2D "
f"and 3D inputs. But received shape: {inputs.shape}."
)
return inputs
def _transpose_spatial_outputs(outputs):
# Undo the transpose in `_transpose_spatial_inputs`.
num_spatial_dims = len(outputs.shape) - 2
if num_spatial_dims == 1:
outputs = tf.transpose(outputs, (0, 2, 1))
elif num_spatial_dims == 2:
outputs = tf.transpose(outputs, (0, 3, 1, 2))
elif num_spatial_dims == 3:
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
strides = pool_size if strides is None else strides
padding = padding.upper()
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
if data_format == "channels_first":
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
inputs = _transpose_spatial_inputs(inputs)
outputs = tf.nn.max_pool(
inputs,
pool_size,
strides,
padding,
tf_data_format,
)
if data_format == "channels_first":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
strides = pool_size if strides is None else strides
padding = padding.upper()
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
if data_format == "channels_first":
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
inputs = _transpose_spatial_inputs(inputs)
outputs = tf.nn.avg_pool(
inputs,
pool_size,
strides,
padding,
tf_data_format,
)
if data_format == "channels_first":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def _compute_static_gather_indices(
input_dim, output_size, small_window, big_window
):
"""Compute gather indices for Two-Pool Gather method (corrected)."""
window_starts = tf.cast(
tf.floor(
tf.cast(tf.range(output_size), tf.float32)
* tf.cast(input_dim, tf.float32)
/ tf.cast(output_size, tf.float32)
),
tf.int32,
)
window_ends = tf.cast(
tf.math.ceil(
tf.cast(tf.range(1, output_size + 1), tf.float32)
* tf.cast(input_dim, tf.float32)
/ tf.cast(output_size, tf.float32)
),
tf.int32,
)
window_ends = tf.minimum(window_ends, input_dim)
window_starts = tf.minimum(window_starts, input_dim - 1)
window_sizes = window_ends - window_starts
is_big_window = tf.equal(window_sizes, big_window)
small_pool_len = max(1, input_dim - small_window + 1)
small_indices = window_starts
big_indices = window_starts + small_pool_len
gather_indices = tf.where(is_big_window, big_indices, small_indices)
return tf.cast(gather_indices, tf.int32)
def _adaptive_average_pool1d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size,)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 1))
static_shape = inputs.shape.as_list()
l_static = static_shape[1]
out_l = output_size[0]
if l_static is None:
raise ValueError(
"Input length must be statically known for adaptive pooling"
)
small_l, big_l = compute_adaptive_pooling_window_sizes(l_static, out_l)
gather_l = _compute_static_gather_indices(l_static, out_l, small_l, big_l)
small_pool_l = tf.nn.pool(
inputs,
window_shape=(small_l,),
pooling_type="AVG",
strides=(1,),
padding="VALID",
data_format="NWC",
)
big_pool_l = tf.nn.pool(
inputs,
window_shape=(big_l,),
pooling_type="AVG",
strides=(1,),
padding="VALID",
data_format="NWC",
)
combined_l = tf.concat([small_pool_l, big_pool_l], axis=1)
pooled_l = tf.gather(combined_l, gather_l, axis=1)
if data_format == "channels_first":
pooled_l = tf.transpose(pooled_l, (0, 2, 1))
return pooled_l
def _adaptive_max_pool1d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size,)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 1))
static_shape = inputs.shape.as_list()
l_static = static_shape[1]
out_l = output_size[0]
if l_static is None:
raise ValueError(
"Input length must be statically known for adaptive pooling"
)
small_l, big_l = compute_adaptive_pooling_window_sizes(l_static, out_l)
gather_l = _compute_static_gather_indices(l_static, out_l, small_l, big_l)
small_pool_l = tf.nn.pool(
inputs,
window_shape=(small_l,),
pooling_type="MAX",
strides=(1,),
padding="VALID",
data_format="NWC",
)
big_pool_l = tf.nn.pool(
inputs,
window_shape=(big_l,),
pooling_type="MAX",
strides=(1,),
padding="VALID",
data_format="NWC",
)
combined_l = tf.concat([small_pool_l, big_pool_l], axis=1)
pooled_l = tf.gather(combined_l, gather_l, axis=1)
if data_format == "channels_first":
pooled_l = tf.transpose(pooled_l, (0, 2, 1))
return pooled_l
def _adaptive_average_pool2d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 3, 1))
static_shape = inputs.shape.as_list()
h_static = static_shape[1]
w_static = static_shape[2]
out_h, out_w = output_size
if h_static is None or w_static is None:
raise ValueError(
"Input spatial dimensions must be "
"statically known for adaptive pooling"
)
small_h, big_h = compute_adaptive_pooling_window_sizes(h_static, out_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w_static, out_w)
gather_h = _compute_static_gather_indices(h_static, out_h, small_h, big_h)
gather_w = _compute_static_gather_indices(w_static, out_w, small_w, big_w)
small_pool_h = tf.nn.pool(
inputs,
window_shape=(small_h, 1),
pooling_type="AVG",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
big_pool_h = tf.nn.pool(
inputs,
window_shape=(big_h, 1),
pooling_type="AVG",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
combined_h = tf.concat([small_pool_h, big_pool_h], axis=1)
pooled_h = tf.gather(combined_h, gather_h, axis=1)
small_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, small_w),
pooling_type="AVG",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
big_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, big_w),
pooling_type="AVG",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
combined_w = tf.concat([small_pool_w, big_pool_w], axis=2)
pooled_w = tf.gather(combined_w, gather_w, axis=2)
if data_format == "channels_first":
pooled_w = tf.transpose(pooled_w, (0, 3, 1, 2))
return pooled_w
def _adaptive_max_pool2d(inputs, output_size, data_format="channels_first"):
"""Adaptive Max Pooling 2D using Two-Pool Gather method."""
if isinstance(output_size, int):
output_size = (output_size, output_size)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 3, 1))
static_shape = inputs.shape.as_list()
h_static = static_shape[1]
w_static = static_shape[2]
out_h, out_w = output_size
if h_static is None or w_static is None:
raise ValueError(
"Input spatial dimensions must be "
"statically known for adaptive pooling"
)
small_h, big_h = compute_adaptive_pooling_window_sizes(h_static, out_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w_static, out_w)
gather_h = _compute_static_gather_indices(h_static, out_h, small_h, big_h)
gather_w = _compute_static_gather_indices(w_static, out_w, small_w, big_w)
small_pool_h = tf.nn.pool(
inputs,
window_shape=(small_h, 1),
pooling_type="MAX",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
big_pool_h = tf.nn.pool(
inputs,
window_shape=(big_h, 1),
pooling_type="MAX",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
combined_h = tf.concat([small_pool_h, big_pool_h], axis=1)
pooled_h = tf.gather(combined_h, gather_h, axis=1)
small_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, small_w),
pooling_type="MAX",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
big_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, big_w),
pooling_type="MAX",
strides=(1, 1),
padding="VALID",
data_format="NHWC",
)
combined_w = tf.concat([small_pool_w, big_pool_w], axis=2)
pooled_w = tf.gather(combined_w, gather_w, axis=2)
if data_format == "channels_first":
pooled_w = tf.transpose(pooled_w, (0, 3, 1, 2))
return pooled_w
def _adaptive_average_pool3d(inputs, output_size, data_format="channels_first"):
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
static_shape = inputs.shape.as_list()
d_static = static_shape[1]
h_static = static_shape[2]
w_static = static_shape[3]
out_d, out_h, out_w = output_size
if d_static is None or h_static is None or w_static is None:
raise ValueError(
"Input spatial dimensions must be "
"statically known for adaptive pooling"
)
small_d, big_d = compute_adaptive_pooling_window_sizes(d_static, out_d)
small_h, big_h = compute_adaptive_pooling_window_sizes(h_static, out_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w_static, out_w)
gather_d = _compute_static_gather_indices(d_static, out_d, small_d, big_d)
gather_h = _compute_static_gather_indices(h_static, out_h, small_h, big_h)
gather_w = _compute_static_gather_indices(w_static, out_w, small_w, big_w)
small_pool_d = tf.nn.pool(
inputs,
window_shape=(small_d, 1, 1),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_d = tf.nn.pool(
inputs,
window_shape=(big_d, 1, 1),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_d = tf.concat([small_pool_d, big_pool_d], axis=1)
pooled_d = tf.gather(combined_d, gather_d, axis=1)
small_pool_h = tf.nn.pool(
pooled_d,
window_shape=(1, small_h, 1),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_h = tf.nn.pool(
pooled_d,
window_shape=(1, big_h, 1),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_h = tf.concat([small_pool_h, big_pool_h], axis=2)
pooled_h = tf.gather(combined_h, gather_h, axis=2)
small_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, 1, small_w),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, 1, big_w),
pooling_type="AVG",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_w = tf.concat([small_pool_w, big_pool_w], axis=3)
pooled_w = tf.gather(combined_w, gather_w, axis=3)
if data_format == "channels_first":
pooled_w = tf.transpose(pooled_w, (0, 4, 1, 2, 3))
return pooled_w
def _adaptive_max_pool3d(inputs, output_size, data_format="channels_first"):
"""Adaptive Max Pooling 3D using Two-Pool Gather method."""
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if data_format == "channels_first":
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
static_shape = inputs.shape.as_list()
d_static = static_shape[1]
h_static = static_shape[2]
w_static = static_shape[3]
out_d, out_h, out_w = output_size
if d_static is None or h_static is None or w_static is None:
raise ValueError(
"Input spatial dimensions must be "
"statically known for adaptive pooling"
)
small_d, big_d = compute_adaptive_pooling_window_sizes(d_static, out_d)
small_h, big_h = compute_adaptive_pooling_window_sizes(h_static, out_h)
small_w, big_w = compute_adaptive_pooling_window_sizes(w_static, out_w)
gather_d = _compute_static_gather_indices(d_static, out_d, small_d, big_d)
gather_h = _compute_static_gather_indices(h_static, out_h, small_h, big_h)
gather_w = _compute_static_gather_indices(w_static, out_w, small_w, big_w)
small_pool_d = tf.nn.pool(
inputs,
window_shape=(small_d, 1, 1),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_d = tf.nn.pool(
inputs,
window_shape=(big_d, 1, 1),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_d = tf.concat([small_pool_d, big_pool_d], axis=1)
pooled_d = tf.gather(combined_d, gather_d, axis=1)
small_pool_h = tf.nn.pool(
pooled_d,
window_shape=(1, small_h, 1),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_h = tf.nn.pool(
pooled_d,
window_shape=(1, big_h, 1),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_h = tf.concat([small_pool_h, big_pool_h], axis=2)
pooled_h = tf.gather(combined_h, gather_h, axis=2)
small_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, 1, small_w),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
big_pool_w = tf.nn.pool(
pooled_h,
window_shape=(1, 1, big_w),
pooling_type="MAX",
strides=(1, 1, 1),
padding="VALID",
data_format="NDHWC",
)
combined_w = tf.concat([small_pool_w, big_pool_w], axis=3)
pooled_w = tf.gather(combined_w, gather_w, axis=3)
if data_format == "channels_first":
pooled_w = tf.transpose(pooled_w, (0, 4, 1, 2, 3))
return pooled_w
def adaptive_average_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
ndims = len(inputs.shape) - 2
if ndims == 1:
return _adaptive_average_pool1d(inputs, output_size, data_format)
elif ndims == 2:
return _adaptive_average_pool2d(inputs, output_size, data_format)
elif ndims == 3:
return _adaptive_average_pool3d(inputs, output_size, data_format)
else:
raise ValueError(
"adaptive_average_pool supports 1D, 2D, or 3D inputs only."
)
def adaptive_max_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
ndims = len(inputs.shape) - 2
if ndims == 1:
return _adaptive_max_pool1d(inputs, output_size, data_format)
elif ndims == 2:
return _adaptive_max_pool2d(inputs, output_size, data_format)
elif ndims == 3:
return _adaptive_max_pool3d(inputs, output_size, data_format)
else:
raise ValueError(
"adaptive_max_pool supports 1D, 2D, or 3D inputs only."
)
def _convert_data_format(data_format, ndim):
if data_format == "channels_last":
if ndim == 3:
return "NWC"
elif ndim == 4:
return "NHWC"
elif ndim == 5:
return "NDHWC"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
elif data_format == "channels_first":
if ndim == 3:
return "NCW"
elif ndim == 4:
return "NCHW"
elif ndim == 5:
return "NCDHW"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
else:
raise ValueError(
f"Invalid data_format: {data_format}. "
'Expected values are ["channels_first", "channels_last"]'
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
def _conv():
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
result = tf.nn.convolution(
inputs,
kernel,
strides,
padding.upper(),
data_format=tf_data_format,
dilations=dilation_rate,
)
result_shape = result.shape
if (
result_shape.is_fully_defined()
and math.prod(result_shape.as_list()) == 0
):
raise ValueError(
"The convolution operation resulted in an empty output. "
"Output shape:"
f" {result_shape}. This can happen if the input is too small "
"for the given kernel size, strides, dilation rate, and "
"padding mode. Please check the input shape and convolution "
"parameters."
)
return result
# Certain ops are are broken in Tensorflow on CPU only.
# We can work around by compiling the op with XLA.
@tf.function(jit_compile=True)
def _conv_xla():
return _conv()
# Channels first "NCDHW" (3d convolutions) are broken on CPU without XLA.
needs_xla = data_format == "channels_first" and len(inputs.shape) == 5
# grouped convolutions are broken on CPU without XLA.
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
needs_xla = needs_xla or channels != kernel.shape[-2]
if needs_xla:
return _conv_xla()
else:
return _conv()
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = len(inputs.shape) - 2
if num_spatial_dims > 2:
raise ValueError(
"`inputs` rank must be 3 (1D conv) or 4 (2D conv). Received: "
f"{inputs.ndim}."
)
# Because we use `tf.nn.depthwise_conv2d` for both 1D and 2D convs, we set
# `tf_data_format` using 2D conv format.
tf_data_format = _convert_data_format(data_format, 4)
padding = padding.upper()
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if num_spatial_dims == 1:
# 1D depthwise conv.
if data_format == "channels_last":
strides = (1,) + strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides * 2
spatial_start_dim = 2
inputs = tf.expand_dims(inputs, spatial_start_dim)
kernel = tf.expand_dims(kernel, axis=0)
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
outputs = tf.nn.depthwise_conv2d(
inputs,
kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
return tf.squeeze(outputs, [spatial_start_dim])
if data_format == "channels_last":
strides = (1,) + strides + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides
spatial_start_dim = 2
return tf.nn.depthwise_conv2d(
inputs,
kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = len(inputs.shape) - 2
if num_spatial_dims > 2:
raise ValueError(
"`num_spatial_dims` must be 1 or 2. Received: "
f"num_spatial_dims={num_spatial_dims}."
)
# Because we use `tf.nn.separable_conv2d` for both 1D and 2D convs, we set
# `tf_data_format` using 2D conv format.
tf_data_format = _convert_data_format(data_format, 4)
padding = padding.upper()
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if num_spatial_dims == 1:
# 1D depthwise conv.
if data_format == "channels_last":
strides = (1,) + strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides * 2
spatial_start_dim = 2
inputs = tf.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = tf.expand_dims(depthwise_kernel, axis=0)
pointwise_kernel = tf.expand_dims(pointwise_kernel, axis=0)
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
outputs = tf.nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
return tf.squeeze(outputs, [spatial_start_dim])
if data_format == "channels_last":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
return tf.nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
kernel_size = kernel.shape[:-2]
filters = kernel.shape[-2]
input_shape = list(inputs.shape)
symbolic_shape = tf.shape(inputs)
for i, e in enumerate(input_shape):
if e is None:
input_shape[i] = symbolic_shape[i]
output_shape = compute_conv_transpose_output_shape(
input_shape,
kernel_size,
filters,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
return tf.nn.conv_transpose(
inputs,
kernel,
output_shape,
strides,
padding=padding.upper(),
data_format=tf_data_format,
dilations=dilation_rate,
)
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
x = convert_to_tensor(x, dtype="int64")
if dtype is None:
dtype = "float32"
else:
dtype = backend.standardize_dtype(dtype)
if sparse:
# We don't use `tf.sparse.bincount`, it doesn't handle negative indices
# and only support rank 1 and 2 tensors (`one_hot` adds a dimension).
if axis < 0:
axis = axis + len(x.shape) + 1
values_count = math.prod(x.shape)
values = tf.reshape(x, (values_count,))
# We deal with negative inputs by having zeros in the output although
# it's useless. It makes shapes static.
values = tf.cast(tf.greater_equal(values, 0), dtype=dtype)
indices = [tf.range(dim) for dim in x.shape]
indices = tf.meshgrid(*indices, indexing="ij")
indices.insert(axis, tf.maximum(x, 0)) # Deal with negative indices
indices = [tf.reshape(a, (values_count, 1)) for a in indices]
indices = [tf.cast(a, tf.int64) for a in indices]
indices = tf.concat(indices, axis=1)
shape = list(x.shape)
shape.insert(axis, num_classes)
return tf.SparseTensor(indices, values, shape)
on_value, off_value = (True, False) if dtype == "bool" else (None, None)
return tf.one_hot(
x,
num_classes,
on_value=on_value,
off_value=off_value,
axis=axis,
dtype=dtype,
)
def multi_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
reduction_axis = 1 if len(x.shape) > 1 else 0
if backend.standardize_dtype(dtype) == "bool":
if sparse:
# `tf.sparse.reduce_max` doesn't work on bool and there is no
# `tf.sparse.reduce_any`.
outputs = one_hot(
x, num_classes, axis=axis, dtype="int8", sparse=True
)
outputs = tf.sparse.reduce_max(
outputs, axis=reduction_axis, output_is_sparse=True
)
outputs_shape = outputs.shape
outputs = tf.cast(outputs, dtype)
outputs.set_shape(outputs_shape)
return outputs
else:
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype)
return tf.reduce_any(outputs, axis=reduction_axis)
else:
if sparse:
# We don't use `tf.sparse.bincount`, it doesn't handle negative
# indices and has a rank limitation.
outputs = one_hot(
x, num_classes, axis=axis, dtype=dtype, sparse=True
)
return tf.sparse.reduce_max(
outputs, axis=reduction_axis, output_is_sparse=True
)
else:
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype)
return tf.reduce_max(outputs, axis=reduction_axis)
def _get_logits(output, from_logits, op_type, fn_name):
"""Retrieves logits tensor from maybe-softmax or maybe-sigmoid tensor."""
output_ = output
from_logits_ = from_logits
has_keras_logits = hasattr(output, "_keras_logits")
if has_keras_logits:
output_ = output._keras_logits
from_logits_ = True
from_expected_op_type = (
hasattr(output, "op")
and not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))
and output.op.type == op_type
) and not has_keras_logits
if from_expected_op_type:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/linalg.py | keras/src/backend/tensorflow/linalg.py | import tensorflow as tf
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import convert_to_tensor
def cholesky(a, upper=False):
out = tf.linalg.cholesky(a)
# tf.linalg.cholesky simply returns NaNs for non-positive definite matrices
out = tf.debugging.check_numerics(out, "Cholesky")
if upper:
return tf.linalg.adjoint(out)
return out
def cholesky_inverse(a, upper=False):
identity = tf.eye(num_rows=tf.shape(a)[-1], dtype=a.dtype)
inv_chol = tf.linalg.triangular_solve(a, identity, lower=not upper)
if upper:
a_inv = tf.matmul(inv_chol, inv_chol, transpose_b=True)
else:
a_inv = tf.matmul(inv_chol, inv_chol, transpose_a=True)
return a_inv
def det(a):
return tf.linalg.det(a)
def eig(a):
return tf.linalg.eig(a)
def eigh(a):
return tf.linalg.eigh(a)
def inv(a):
return tf.linalg.inv(a)
def lu_factor(a):
lu, p = tf.linalg.lu(a)
return lu, tf.math.invert_permutation(p)
def norm(x, ord=None, axis=None, keepdims=False):
from keras.src.backend.tensorflow.numpy import moveaxis
x = convert_to_tensor(x)
x_shape = x.shape
ndim = x_shape.rank
if axis is None:
axis = tuple(range(ndim))
elif isinstance(axis, int):
axis = (axis,)
if any(a < -ndim or a >= ndim for a in axis):
raise ValueError(
"All `axis` values must be in the range [-ndim, ndim). "
f"Received inputs with ndim={ndim}, while axis={axis}"
)
axis = axis[0] if len(axis) == 1 else axis
num_axes = 1 if isinstance(axis, int) else len(axis)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
# Ref: jax.numpy.linalg.norm
if num_axes == 1:
if ord is None or ord == 2:
return tf.sqrt(
tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)
)
elif ord == float("inf"):
return tf.math.reduce_max(
tf.math.abs(x), axis=axis, keepdims=keepdims
)
elif ord == float("-inf"):
return tf.math.reduce_min(
tf.math.abs(x), axis=axis, keepdims=keepdims
)
elif ord == 0:
return tf.math.reduce_sum(
tf.cast(tf.not_equal(x, 0), dtype=x.dtype),
axis=axis,
keepdims=keepdims,
)
elif isinstance(ord, str):
raise ValueError(
f"Invalid `ord` argument for vector norm. Received: ord={ord}"
)
else:
ord = convert_to_tensor(ord, dtype=x.dtype)
out = tf.math.reduce_sum(
tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims
)
return tf.pow(out, 1.0 / ord)
elif num_axes == 2:
row_axis, col_axis = axis[0], axis[1]
row_axis = row_axis + ndim if row_axis < 0 else row_axis
col_axis = col_axis + ndim if col_axis < 0 else col_axis
if ord is None or ord == "fro":
return tf.sqrt(
tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)
)
elif ord == 1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
x = tf.math.reduce_max(
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis,
keepdims=keepdims,
)
elif ord == -1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
x = tf.math.reduce_min(
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis,
keepdims=keepdims,
)
elif ord == float("inf"):
if not keepdims and row_axis > col_axis:
row_axis -= 1
x = tf.math.reduce_max(
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis,
keepdims=keepdims,
)
elif ord == float("-inf"):
if not keepdims and row_axis > col_axis:
row_axis -= 1
x = tf.math.reduce_min(
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis,
keepdims=keepdims,
)
elif ord in ("nuc", 2, -2):
x = moveaxis(x, axis, (-2, -1))
if ord == -2:
x = tf.math.reduce_min(
tf.linalg.svd(x, compute_uv=False), axis=-1
)
elif ord == 2:
x = tf.math.reduce_max(
tf.linalg.svd(x, compute_uv=False), axis=-1
)
else:
x = tf.math.reduce_sum(
tf.linalg.svd(x, compute_uv=False), axis=-1
)
if keepdims:
x = tf.expand_dims(x, axis[0])
x = tf.expand_dims(x, axis[1])
else:
raise ValueError(
f"Invalid `ord` argument for matrix norm. Received: ord={ord}"
)
return x
else:
raise ValueError(f"Invalid axis values. Received: axis={axis}")
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
if mode == "reduced":
return tf.linalg.qr(x)
return tf.linalg.qr(x, full_matrices=True)
def solve(a, b):
# tensorflow.linalg.solve only supports same rank inputs
if b.shape.ndims == a.shape.ndims - 1:
b = tf.expand_dims(b, axis=-1)
return tf.squeeze(tf.linalg.solve(a, b), axis=-1)
return tf.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.shape.ndims == a.shape.ndims - 1:
b = tf.expand_dims(b, axis=-1)
return tf.squeeze(
tf.linalg.triangular_solve(a, b, lower=lower), axis=-1
)
return tf.linalg.triangular_solve(a, b, lower=lower)
def svd(x, full_matrices=True, compute_uv=True):
if compute_uv is False:
return tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=False)
s, u, v = tf.linalg.svd(
x, full_matrices=full_matrices, compute_uv=compute_uv
)
return u, s, tf.linalg.adjoint(v)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
if a.shape[0] != b.shape[0]:
raise ValueError("Leading dimensions of input arrays must match")
b_orig_ndim = b.ndim
if b_orig_ndim == 1:
b = b[:, None]
if a.ndim != 2:
raise TypeError(
f"{a.ndim}-dimensional array given. Array must be two-dimensional"
)
if b.ndim != 2:
raise TypeError(
f"{b.ndim}-dimensional array given. "
"Array must be one or two-dimensional"
)
m, n = a.shape
dtype = a.dtype
eps = tf.experimental.numpy.finfo(dtype).eps
if a.shape == ():
s = tf.zeros(0, dtype=a.dtype)
x = tf.zeros((n, *b.shape[1:]), dtype=a.dtype)
else:
if rcond is None:
rcond = eps * max(n, m)
else:
rcond = tf.where(rcond < 0, eps, rcond)
u, s, vt = svd(a, full_matrices=False)
mask = s >= tf.convert_to_tensor(rcond, dtype=s.dtype) * s[0]
safe_s = tf.cast(tf.where(mask, s, 1), dtype=a.dtype)
s_inv = tf.where(mask, 1 / safe_s, 0)[:, tf.newaxis]
u_t_b = tf.matmul(tf.transpose(tf.math.conj(u)), b)
x = tf.matmul(tf.transpose(tf.math.conj(vt)), s_inv * u_t_b)
if b_orig_ndim == 1:
x = tf.reshape(x, [-1])
return x
def jvp(fun, primals, tangents, has_aux=False):
primal_flat = tf.nest.flatten(primals)
tangent_flat = tf.nest.flatten(tangents)
tangent_flat = [
tf.cast(t, p.dtype) for t, p in zip(tangent_flat, primal_flat)
]
with tf.autodiff.ForwardAccumulator(primal_flat, tangent_flat) as acc:
if has_aux:
primals_out, aux = fun(*primals)
else:
primals_out = fun(*primals)
primals_out_flat = tf.nest.flatten(primals_out)
tangents_out_flat = [acc.jvp(po) for po in primals_out_flat]
tangents_out = tf.nest.pack_sequence_as(primals_out, tangents_out_flat)
if has_aux:
return primals_out, tangents_out, aux
return primals_out, tangents_out
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/tensorboard.py | keras/src/backend/tensorflow/tensorboard.py | from keras.src.utils.module_utils import tensorflow as tf
def start_trace(logdir):
tf.profiler.experimental.start(logdir=logdir)
def stop_trace(save):
tf.profiler.experimental.stop(save=save)
def start_batch_trace(batch):
batch_trace_context = tf.profiler.experimental.Trace(
"Profiled batch", step_num=batch
)
batch_trace_context.__enter__()
return batch_trace_context
def stop_batch_trace(batch_trace_context):
batch_trace_context.__exit__(None, None, None)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.