repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/vgg19/__init__.py | keras/api/applications/vgg19/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.vgg19 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg19 import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/efficientnet/__init__.py | keras/api/applications/efficientnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.efficientnet import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/inception_v3/__init__.py | keras/api/applications/inception_v3/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.inception_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_v3 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/activations/__init__.py | keras/api/activations/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activations import celu as celu
from keras.src.activations.activations import elu as elu
from keras.src.activations.activations import exponential as exponential
from keras.src.activations.activations import gelu as gelu
from keras.src.activations.activations import glu as glu
from keras.src.activations.activations import hard_shrink as hard_shrink
from keras.src.activations.activations import hard_sigmoid as hard_sigmoid
from keras.src.activations.activations import hard_silu as hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh as hard_tanh
from keras.src.activations.activations import leaky_relu as leaky_relu
from keras.src.activations.activations import linear as linear
from keras.src.activations.activations import log_sigmoid as log_sigmoid
from keras.src.activations.activations import log_softmax as log_softmax
from keras.src.activations.activations import mish as mish
from keras.src.activations.activations import relu as relu
from keras.src.activations.activations import relu6 as relu6
from keras.src.activations.activations import selu as selu
from keras.src.activations.activations import sigmoid as sigmoid
from keras.src.activations.activations import silu as silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink as soft_shrink
from keras.src.activations.activations import softmax as softmax
from keras.src.activations.activations import softplus as softplus
from keras.src.activations.activations import softsign as softsign
from keras.src.activations.activations import sparse_plus as sparse_plus
from keras.src.activations.activations import sparse_sigmoid as sparse_sigmoid
from keras.src.activations.activations import sparsemax as sparsemax
from keras.src.activations.activations import squareplus as squareplus
from keras.src.activations.activations import tanh as tanh
from keras.src.activations.activations import tanh_shrink as tanh_shrink
from keras.src.activations.activations import threshold as threshold
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/utils/__init__.py | keras/api/utils/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.layers.preprocessing.feature_space import (
FeatureSpace as FeatureSpace,
)
from keras.src.ops.operation_utils import get_source_inputs as get_source_inputs
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight as pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight as unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as PyDataset,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import (
audio_dataset_from_directory as audio_dataset_from_directory,
)
from keras.src.utils.config import Config as Config
from keras.src.utils.dataset_utils import split_dataset as split_dataset
from keras.src.utils.file_utils import get_file as get_file
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.model_visualization import model_to_dot as model_to_dot
from keras.src.utils.model_visualization import plot_model as plot_model
from keras.src.utils.numerical_utils import normalize as normalize
from keras.src.utils.numerical_utils import to_categorical as to_categorical
from keras.src.utils.progbar import Progbar as Progbar
from keras.src.utils.rng_utils import set_random_seed as set_random_seed
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
from keras.utils import bounding_boxes as bounding_boxes
from keras.utils import legacy as legacy
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/utils/bounding_boxes/__init__.py | keras/api/utils/bounding_boxes/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform as affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size as clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format as convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop as crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes as decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas as encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad as pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou as compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou as compute_iou,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/utils/legacy/__init__.py | keras/api/utils/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/callbacks/__init__.py | keras/api/callbacks/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.callbacks.backup_and_restore import (
BackupAndRestore as BackupAndRestore,
)
from keras.src.callbacks.callback import Callback as Callback
from keras.src.callbacks.callback_list import CallbackList as CallbackList
from keras.src.callbacks.csv_logger import CSVLogger as CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping as EarlyStopping
from keras.src.callbacks.history import History as History
from keras.src.callbacks.lambda_callback import LambdaCallback as LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import (
LearningRateScheduler as LearningRateScheduler,
)
from keras.src.callbacks.model_checkpoint import (
ModelCheckpoint as ModelCheckpoint,
)
from keras.src.callbacks.orbax_checkpoint import (
OrbaxCheckpoint as OrbaxCheckpoint,
)
from keras.src.callbacks.progbar_logger import ProgbarLogger as ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import (
ReduceLROnPlateau as ReduceLROnPlateau,
)
from keras.src.callbacks.remote_monitor import RemoteMonitor as RemoteMonitor
from keras.src.callbacks.swap_ema_weights import (
SwapEMAWeights as SwapEMAWeights,
)
from keras.src.callbacks.tensorboard import TensorBoard as TensorBoard
from keras.src.callbacks.terminate_on_nan import (
TerminateOnNaN as TerminateOnNaN,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/backend/__init__.py | keras/api/backend/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.dtypes import result_type as result_type
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import is_float_dtype as is_float_dtype
from keras.src.backend.common.variables import is_int_dtype as is_int_dtype
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.backend.config import backend as backend
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.utils.naming import get_uid as get_uid
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/constraints/__init__.py | keras/api/constraints/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize as deserialize
from keras.src.constraints import get as get
from keras.src.constraints import serialize as serialize
from keras.src.constraints.constraints import Constraint as Constraint
from keras.src.constraints.constraints import MaxNorm as MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg as NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm as UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/wrappers/__init__.py | keras/api/wrappers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import (
SKLearnClassifier as SKLearnClassifier,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnRegressor as SKLearnRegressor,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnTransformer as SKLearnTransformer,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/distribution/__init__.py | keras/api/distribution/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel as DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh
from keras.src.distribution.distribution_lib import LayoutMap as LayoutMap
from keras.src.distribution.distribution_lib import (
ModelParallel as ModelParallel,
)
from keras.src.distribution.distribution_lib import TensorLayout as TensorLayout
from keras.src.distribution.distribution_lib import (
distribute_tensor as distribute_tensor,
)
from keras.src.distribution.distribution_lib import distribution as distribution
from keras.src.distribution.distribution_lib import (
get_device_count as get_device_count,
)
from keras.src.distribution.distribution_lib import initialize as initialize
from keras.src.distribution.distribution_lib import list_devices as list_devices
from keras.src.distribution.distribution_lib import (
set_distribution as set_distribution,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/mixed_precision/__init__.py | keras/api/mixed_precision/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/layers/__init__.py | keras/api/layers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.tfsm_layer import TFSMLayer as TFSMLayer
from keras.src.layers import deserialize as deserialize
from keras.src.layers import serialize as serialize
from keras.src.layers.activations.activation import Activation as Activation
from keras.src.layers.activations.elu import ELU as ELU
from keras.src.layers.activations.leaky_relu import LeakyReLU as LeakyReLU
from keras.src.layers.activations.prelu import PReLU as PReLU
from keras.src.layers.activations.relu import ReLU as ReLU
from keras.src.layers.activations.softmax import Softmax as Softmax
from keras.src.layers.attention.additive_attention import (
AdditiveAttention as AdditiveAttention,
)
from keras.src.layers.attention.attention import Attention as Attention
from keras.src.layers.attention.grouped_query_attention import (
GroupedQueryAttention as GroupQueryAttention,
)
from keras.src.layers.attention.multi_head_attention import (
MultiHeadAttention as MultiHeadAttention,
)
from keras.src.layers.convolutional.conv1d import Conv1D as Conv1D
from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D
from keras.src.layers.convolutional.conv1d_transpose import (
Conv1DTranspose as Conv1DTranspose,
)
from keras.src.layers.convolutional.conv1d_transpose import (
Conv1DTranspose as Convolution1DTranspose,
)
from keras.src.layers.convolutional.conv2d import Conv2D as Conv2D
from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D
from keras.src.layers.convolutional.conv2d_transpose import (
Conv2DTranspose as Conv2DTranspose,
)
from keras.src.layers.convolutional.conv2d_transpose import (
Conv2DTranspose as Convolution2DTranspose,
)
from keras.src.layers.convolutional.conv3d import Conv3D as Conv3D
from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D
from keras.src.layers.convolutional.conv3d_transpose import (
Conv3DTranspose as Conv3DTranspose,
)
from keras.src.layers.convolutional.conv3d_transpose import (
Conv3DTranspose as Convolution3DTranspose,
)
from keras.src.layers.convolutional.depthwise_conv1d import (
DepthwiseConv1D as DepthwiseConv1D,
)
from keras.src.layers.convolutional.depthwise_conv2d import (
DepthwiseConv2D as DepthwiseConv2D,
)
from keras.src.layers.convolutional.separable_conv1d import (
SeparableConv1D as SeparableConv1D,
)
from keras.src.layers.convolutional.separable_conv1d import (
SeparableConv1D as SeparableConvolution1D,
)
from keras.src.layers.convolutional.separable_conv2d import (
SeparableConv2D as SeparableConv2D,
)
from keras.src.layers.convolutional.separable_conv2d import (
SeparableConv2D as SeparableConvolution2D,
)
from keras.src.layers.core.dense import Dense as Dense
from keras.src.layers.core.einsum_dense import EinsumDense as EinsumDense
from keras.src.layers.core.embedding import Embedding as Embedding
from keras.src.layers.core.identity import Identity as Identity
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.core.input_layer import InputLayer as InputLayer
from keras.src.layers.core.lambda_layer import Lambda as Lambda
from keras.src.layers.core.masking import Masking as Masking
from keras.src.layers.core.reversible_embedding import (
ReversibleEmbedding as ReversibleEmbedding,
)
from keras.src.layers.core.wrapper import Wrapper as Wrapper
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.layers.merging.add import Add as Add
from keras.src.layers.merging.add import add as add
from keras.src.layers.merging.average import Average as Average
from keras.src.layers.merging.average import average as average
from keras.src.layers.merging.concatenate import Concatenate as Concatenate
from keras.src.layers.merging.concatenate import concatenate as concatenate
from keras.src.layers.merging.dot import Dot as Dot
from keras.src.layers.merging.dot import dot as dot
from keras.src.layers.merging.maximum import Maximum as Maximum
from keras.src.layers.merging.maximum import maximum as maximum
from keras.src.layers.merging.minimum import Minimum as Minimum
from keras.src.layers.merging.minimum import minimum as minimum
from keras.src.layers.merging.multiply import Multiply as Multiply
from keras.src.layers.merging.multiply import multiply as multiply
from keras.src.layers.merging.subtract import Subtract as Subtract
from keras.src.layers.merging.subtract import subtract as subtract
from keras.src.layers.normalization.batch_normalization import (
BatchNormalization as BatchNormalization,
)
from keras.src.layers.normalization.group_normalization import (
GroupNormalization as GroupNormalization,
)
from keras.src.layers.normalization.layer_normalization import (
LayerNormalization as LayerNormalization,
)
from keras.src.layers.normalization.rms_normalization import (
RMSNormalization as RMSNormalization,
)
from keras.src.layers.normalization.spectral_normalization import (
SpectralNormalization as SpectralNormalization,
)
from keras.src.layers.normalization.unit_normalization import (
UnitNormalization as UnitNormalization,
)
from keras.src.layers.pooling.adaptive_average_pooling1d import (
AdaptiveAveragePooling1D as AdaptiveAveragePooling1D,
)
from keras.src.layers.pooling.adaptive_average_pooling2d import (
AdaptiveAveragePooling2D as AdaptiveAveragePooling2D,
)
from keras.src.layers.pooling.adaptive_average_pooling3d import (
AdaptiveAveragePooling3D as AdaptiveAveragePooling3D,
)
from keras.src.layers.pooling.adaptive_max_pooling1d import (
AdaptiveMaxPooling1D as AdaptiveMaxPooling1D,
)
from keras.src.layers.pooling.adaptive_max_pooling2d import (
AdaptiveMaxPooling2D as AdaptiveMaxPooling2D,
)
from keras.src.layers.pooling.adaptive_max_pooling3d import (
AdaptiveMaxPooling3D as AdaptiveMaxPooling3D,
)
from keras.src.layers.pooling.average_pooling1d import (
AveragePooling1D as AveragePooling1D,
)
from keras.src.layers.pooling.average_pooling1d import (
AveragePooling1D as AvgPool1D,
)
from keras.src.layers.pooling.average_pooling2d import (
AveragePooling2D as AveragePooling2D,
)
from keras.src.layers.pooling.average_pooling2d import (
AveragePooling2D as AvgPool2D,
)
from keras.src.layers.pooling.average_pooling3d import (
AveragePooling3D as AveragePooling3D,
)
from keras.src.layers.pooling.average_pooling3d import (
AveragePooling3D as AvgPool3D,
)
from keras.src.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D as GlobalAveragePooling1D,
)
from keras.src.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D as GlobalAvgPool1D,
)
from keras.src.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D as GlobalAveragePooling2D,
)
from keras.src.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D as GlobalAvgPool2D,
)
from keras.src.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D as GlobalAveragePooling3D,
)
from keras.src.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D as GlobalAvgPool3D,
)
from keras.src.layers.pooling.global_max_pooling1d import (
GlobalMaxPooling1D as GlobalMaxPool1D,
)
from keras.src.layers.pooling.global_max_pooling1d import (
GlobalMaxPooling1D as GlobalMaxPooling1D,
)
from keras.src.layers.pooling.global_max_pooling2d import (
GlobalMaxPooling2D as GlobalMaxPool2D,
)
from keras.src.layers.pooling.global_max_pooling2d import (
GlobalMaxPooling2D as GlobalMaxPooling2D,
)
from keras.src.layers.pooling.global_max_pooling3d import (
GlobalMaxPooling3D as GlobalMaxPool3D,
)
from keras.src.layers.pooling.global_max_pooling3d import (
GlobalMaxPooling3D as GlobalMaxPooling3D,
)
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPooling1D
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPooling2D
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPooling3D
from keras.src.layers.preprocessing.category_encoding import (
CategoryEncoding as CategoryEncoding,
)
from keras.src.layers.preprocessing.discretization import (
Discretization as Discretization,
)
from keras.src.layers.preprocessing.hashed_crossing import (
HashedCrossing as HashedCrossing,
)
from keras.src.layers.preprocessing.hashing import Hashing as Hashing
from keras.src.layers.preprocessing.image_preprocessing.aug_mix import (
AugMix as AugMix,
)
from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import (
AutoContrast as AutoContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.center_crop import (
CenterCrop as CenterCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.cut_mix import (
CutMix as CutMix,
)
from keras.src.layers.preprocessing.image_preprocessing.equalization import (
Equalization as Equalization,
)
from keras.src.layers.preprocessing.image_preprocessing.max_num_bounding_box import (
MaxNumBoundingBoxes as MaxNumBoundingBoxes,
)
from keras.src.layers.preprocessing.image_preprocessing.mix_up import (
MixUp as MixUp,
)
from keras.src.layers.preprocessing.image_preprocessing.rand_augment import (
RandAugment as RandAugment,
)
from keras.src.layers.preprocessing.image_preprocessing.random_brightness import (
RandomBrightness as RandomBrightness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_degeneration import (
RandomColorDegeneration as RandomColorDegeneration,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_jitter import (
RandomColorJitter as RandomColorJitter,
)
from keras.src.layers.preprocessing.image_preprocessing.random_contrast import (
RandomContrast as RandomContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.random_crop import (
RandomCrop as RandomCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.random_elastic_transform import (
RandomElasticTransform as RandomElasticTransform,
)
from keras.src.layers.preprocessing.image_preprocessing.random_erasing import (
RandomErasing as RandomErasing,
)
from keras.src.layers.preprocessing.image_preprocessing.random_flip import (
RandomFlip as RandomFlip,
)
from keras.src.layers.preprocessing.image_preprocessing.random_gaussian_blur import (
RandomGaussianBlur as RandomGaussianBlur,
)
from keras.src.layers.preprocessing.image_preprocessing.random_grayscale import (
RandomGrayscale as RandomGrayscale,
)
from keras.src.layers.preprocessing.image_preprocessing.random_hue import (
RandomHue as RandomHue,
)
from keras.src.layers.preprocessing.image_preprocessing.random_invert import (
RandomInvert as RandomInvert,
)
from keras.src.layers.preprocessing.image_preprocessing.random_perspective import (
RandomPerspective as RandomPerspective,
)
from keras.src.layers.preprocessing.image_preprocessing.random_posterization import (
RandomPosterization as RandomPosterization,
)
from keras.src.layers.preprocessing.image_preprocessing.random_rotation import (
RandomRotation as RandomRotation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_saturation import (
RandomSaturation as RandomSaturation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_sharpness import (
RandomSharpness as RandomSharpness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_shear import (
RandomShear as RandomShear,
)
from keras.src.layers.preprocessing.image_preprocessing.random_translation import (
RandomTranslation as RandomTranslation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_zoom import (
RandomZoom as RandomZoom,
)
from keras.src.layers.preprocessing.image_preprocessing.resizing import (
Resizing as Resizing,
)
from keras.src.layers.preprocessing.image_preprocessing.solarization import (
Solarization as Solarization,
)
from keras.src.layers.preprocessing.integer_lookup import (
IntegerLookup as IntegerLookup,
)
from keras.src.layers.preprocessing.mel_spectrogram import (
MelSpectrogram as MelSpectrogram,
)
from keras.src.layers.preprocessing.normalization import (
Normalization as Normalization,
)
from keras.src.layers.preprocessing.pipeline import Pipeline as Pipeline
from keras.src.layers.preprocessing.rescaling import Rescaling as Rescaling
from keras.src.layers.preprocessing.stft_spectrogram import (
STFTSpectrogram as STFTSpectrogram,
)
from keras.src.layers.preprocessing.string_lookup import (
StringLookup as StringLookup,
)
from keras.src.layers.preprocessing.text_vectorization import (
TextVectorization as TextVectorization,
)
from keras.src.layers.regularization.activity_regularization import (
ActivityRegularization as ActivityRegularization,
)
from keras.src.layers.regularization.alpha_dropout import (
AlphaDropout as AlphaDropout,
)
from keras.src.layers.regularization.dropout import Dropout as Dropout
from keras.src.layers.regularization.gaussian_dropout import (
GaussianDropout as GaussianDropout,
)
from keras.src.layers.regularization.gaussian_noise import (
GaussianNoise as GaussianNoise,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout1D as SpatialDropout1D,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout2D as SpatialDropout2D,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout3D as SpatialDropout3D,
)
from keras.src.layers.reshaping.cropping1d import Cropping1D as Cropping1D
from keras.src.layers.reshaping.cropping2d import Cropping2D as Cropping2D
from keras.src.layers.reshaping.cropping3d import Cropping3D as Cropping3D
from keras.src.layers.reshaping.flatten import Flatten as Flatten
from keras.src.layers.reshaping.permute import Permute as Permute
from keras.src.layers.reshaping.repeat_vector import (
RepeatVector as RepeatVector,
)
from keras.src.layers.reshaping.reshape import Reshape as Reshape
from keras.src.layers.reshaping.up_sampling1d import (
UpSampling1D as UpSampling1D,
)
from keras.src.layers.reshaping.up_sampling2d import (
UpSampling2D as UpSampling2D,
)
from keras.src.layers.reshaping.up_sampling3d import (
UpSampling3D as UpSampling3D,
)
from keras.src.layers.reshaping.zero_padding1d import (
ZeroPadding1D as ZeroPadding1D,
)
from keras.src.layers.reshaping.zero_padding2d import (
ZeroPadding2D as ZeroPadding2D,
)
from keras.src.layers.reshaping.zero_padding3d import (
ZeroPadding3D as ZeroPadding3D,
)
from keras.src.layers.rnn.bidirectional import Bidirectional as Bidirectional
from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D as ConvLSTM1D
from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D as ConvLSTM2D
from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D as ConvLSTM3D
from keras.src.layers.rnn.gru import GRU as GRU
from keras.src.layers.rnn.gru import GRUCell as GRUCell
from keras.src.layers.rnn.lstm import LSTM as LSTM
from keras.src.layers.rnn.lstm import LSTMCell as LSTMCell
from keras.src.layers.rnn.rnn import RNN as RNN
from keras.src.layers.rnn.simple_rnn import SimpleRNN as SimpleRNN
from keras.src.layers.rnn.simple_rnn import SimpleRNNCell as SimpleRNNCell
from keras.src.layers.rnn.stacked_rnn_cells import (
StackedRNNCells as StackedRNNCells,
)
from keras.src.layers.rnn.time_distributed import (
TimeDistributed as TimeDistributed,
)
from keras.src.utils.jax_layer import FlaxLayer as FlaxLayer
from keras.src.utils.jax_layer import JaxLayer as JaxLayer
from keras.src.utils.torch_utils import TorchModuleWrapper as TorchModuleWrapper
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/legacy/__init__.py | keras/api/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.legacy import saving as saving
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/legacy/saving/__init__.py | keras/api/legacy/saving/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/__init__.py | keras/api/_tf_keras/__init__.py | from keras._tf_keras import keras
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/__init__.py | keras/api/_tf_keras/keras/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras import activations as activations
from keras import applications as applications
from keras import callbacks as callbacks
from keras import config as config
from keras import constraints as constraints
from keras import datasets as datasets
from keras import distillation as distillation
from keras import distribution as distribution
from keras import dtype_policies as dtype_policies
from keras import export as export
from keras import initializers as initializers
from keras import legacy as legacy
from keras import mixed_precision as mixed_precision
from keras import models as models
from keras import ops as ops
from keras import optimizers as optimizers
from keras import quantizers as quantizers
from keras import random as random
from keras import regularizers as regularizers
from keras import tree as tree
from keras import utils as utils
from keras import visualization as visualization
from keras import wrappers as wrappers
from keras._tf_keras.keras import backend as backend
from keras._tf_keras.keras import layers as layers
from keras._tf_keras.keras import losses as losses
from keras._tf_keras.keras import metrics as metrics
from keras._tf_keras.keras import preprocessing as preprocessing
from keras.src.backend import Variable as Variable
from keras.src.backend import device as device
from keras.src.backend import name_scope as name_scope
from keras.src.backend.common.keras_tensor import KerasTensor as KerasTensor
from keras.src.backend.common.remat import RematScope as RematScope
from keras.src.backend.common.remat import remat as remat
from keras.src.backend.common.stateless_scope import (
StatelessScope as StatelessScope,
)
from keras.src.backend.common.symbolic_scope import (
SymbolicScope as SymbolicScope,
)
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.losses.loss import Loss as Loss
from keras.src.metrics.metric import Metric as Metric
from keras.src.models.model import Model as Model
from keras.src.models.sequential import Sequential as Sequential
from keras.src.ops.function import Function as Function
from keras.src.ops.operation import Operation as Operation
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.regularizers.regularizers import Regularizer as Regularizer
from keras.src.version import __version__ as __version__
from keras.src.version import version as version
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/initializers/__init__.py | keras/api/_tf_keras/keras/initializers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize as deserialize
from keras.src.initializers import get as get
from keras.src.initializers import serialize as serialize
from keras.src.initializers.constant_initializers import STFT as STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant as Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity as Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones as Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros as Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.initializers.random_initializers import (
GlorotNormal as GlorotNormal,
)
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as GlorotUniform,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal as HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform as HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import (
LecunNormal as LecunNormal,
)
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import (
LecunUniform as LecunUniform,
)
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal as Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import (
RandomNormal as RandomNormal,
)
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import (
RandomUniform as RandomUniform,
)
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as TruncatedNormal,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as VarianceScaling,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/distillation/__init__.py | keras/api/_tf_keras/keras/distillation/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distillation.distillation_loss import (
DistillationLoss as DistillationLoss,
)
from keras.src.distillation.distillation_loss import (
FeatureDistillation as FeatureDistillation,
)
from keras.src.distillation.distillation_loss import (
LogitsDistillation as LogitsDistillation,
)
from keras.src.distillation.distiller import Distiller as Distiller
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/regularizers/__init__.py | keras/api/_tf_keras/keras/regularizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.regularizers import deserialize as deserialize
from keras.src.regularizers import get as get
from keras.src.regularizers import serialize as serialize
from keras.src.regularizers.regularizers import L1 as L1
from keras.src.regularizers.regularizers import L1 as l1
from keras.src.regularizers.regularizers import L1L2 as L1L2
from keras.src.regularizers.regularizers import L1L2 as l1_l2
from keras.src.regularizers.regularizers import L2 as L2
from keras.src.regularizers.regularizers import L2 as l2
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as OrthogonalRegularizer,
)
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as orthogonal_regularizer,
)
from keras.src.regularizers.regularizers import Regularizer as Regularizer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/saving/__init__.py | keras/api/_tf_keras/keras/saving/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import load_weights as load_weights
from keras.src.saving.saving_api import save_model as save_model
from keras.src.saving.saving_api import save_weights as save_weights
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/optimizers/__init__.py | keras/api/_tf_keras/keras/optimizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.optimizers import legacy as legacy
from keras.optimizers import schedules as schedules
from keras.src.optimizers import deserialize as deserialize
from keras.src.optimizers import get as get
from keras.src.optimizers import serialize as serialize
from keras.src.optimizers.adadelta import Adadelta as Adadelta
from keras.src.optimizers.adafactor import Adafactor as Adafactor
from keras.src.optimizers.adagrad import Adagrad as Adagrad
from keras.src.optimizers.adam import Adam as Adam
from keras.src.optimizers.adamax import Adamax as Adamax
from keras.src.optimizers.adamw import AdamW as AdamW
from keras.src.optimizers.ftrl import Ftrl as Ftrl
from keras.src.optimizers.lamb import Lamb as Lamb
from keras.src.optimizers.lion import Lion as Lion
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
from keras.src.optimizers.muon import Muon as Muon
from keras.src.optimizers.nadam import Nadam as Nadam
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.optimizers.rmsprop import RMSprop as RMSprop
from keras.src.optimizers.sgd import SGD as SGD
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/optimizers/schedules/__init__.py | keras/api/_tf_keras/keras/optimizers/schedules/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecay as CosineDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts as CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay as ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay as InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule as LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay as PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay as PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
deserialize as deserialize,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
serialize as serialize,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/optimizers/legacy/__init__.py | keras/api/_tf_keras/keras/optimizers/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers import LegacyOptimizerWarning as Adagrad
from keras.src.optimizers import LegacyOptimizerWarning as Adam
from keras.src.optimizers import LegacyOptimizerWarning as Ftrl
from keras.src.optimizers import LegacyOptimizerWarning as Optimizer
from keras.src.optimizers import LegacyOptimizerWarning as RMSprop
from keras.src.optimizers import LegacyOptimizerWarning as SGD
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/preprocessing/__init__.py | keras/api/_tf_keras/keras/preprocessing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image as image
from keras._tf_keras.keras.preprocessing import sequence as sequence
from keras._tf_keras.keras.preprocessing import text as text
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/preprocessing/image/__init__.py | keras/api/_tf_keras/keras/preprocessing/image/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.image import (
DirectoryIterator as DirectoryIterator,
)
from keras.src.legacy.preprocessing.image import (
ImageDataGenerator as ImageDataGenerator,
)
from keras.src.legacy.preprocessing.image import Iterator as Iterator
from keras.src.legacy.preprocessing.image import (
NumpyArrayIterator as NumpyArrayIterator,
)
from keras.src.legacy.preprocessing.image import (
apply_affine_transform as apply_affine_transform,
)
from keras.src.legacy.preprocessing.image import (
apply_brightness_shift as apply_brightness_shift,
)
from keras.src.legacy.preprocessing.image import (
apply_channel_shift as apply_channel_shift,
)
from keras.src.legacy.preprocessing.image import (
random_brightness as random_brightness,
)
from keras.src.legacy.preprocessing.image import (
random_channel_shift as random_channel_shift,
)
from keras.src.legacy.preprocessing.image import (
random_rotation as random_rotation,
)
from keras.src.legacy.preprocessing.image import random_shear as random_shear
from keras.src.legacy.preprocessing.image import random_shift as random_shift
from keras.src.legacy.preprocessing.image import random_zoom as random_zoom
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.image_utils import smart_resize as smart_resize
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py | keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import (
TimeseriesGenerator as TimeseriesGenerator,
)
from keras.src.legacy.preprocessing.sequence import (
make_sampling_table as make_sampling_table,
)
from keras.src.legacy.preprocessing.sequence import skipgrams as skipgrams
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/preprocessing/text/__init__.py | keras/api/_tf_keras/keras/preprocessing/text/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.text import Tokenizer as Tokenizer
from keras.src.legacy.preprocessing.text import hashing_trick as hashing_trick
from keras.src.legacy.preprocessing.text import one_hot as one_hot
from keras.src.legacy.preprocessing.text import (
text_to_word_sequence as text_to_word_sequence,
)
from keras.src.legacy.preprocessing.text import (
tokenizer_from_json as tokenizer_from_json,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/dtype_policies/__init__.py | keras/api/_tf_keras/keras/dtype_policies/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize as deserialize
from keras.src.dtype_policies import get as get
from keras.src.dtype_policies import serialize as serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
GPTQDTypePolicy as GPTQDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedDTypePolicy as QuantizedDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy,
)
from keras.src.dtype_policies.dtype_policy_map import (
DTypePolicyMap as DTypePolicyMap,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/losses/__init__.py | keras/api/_tf_keras/keras/losses/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction as Reduction
from keras.src.losses import deserialize as deserialize
from keras.src.losses import get as get
from keras.src.losses import serialize as serialize
from keras.src.losses.loss import Loss as Loss
from keras.src.losses.losses import CTC as CTC
from keras.src.losses.losses import BinaryCrossentropy as BinaryCrossentropy
from keras.src.losses.losses import (
BinaryFocalCrossentropy as BinaryFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalFocalCrossentropy as CategoricalFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalGeneralizedCrossEntropy as CategoricalGeneralizedCrossEntropy,
)
from keras.src.losses.losses import CategoricalHinge as CategoricalHinge
from keras.src.losses.losses import Circle as Circle
from keras.src.losses.losses import CosineSimilarity as CosineSimilarity
from keras.src.losses.losses import Dice as Dice
from keras.src.losses.losses import Hinge as Hinge
from keras.src.losses.losses import Huber as Huber
from keras.src.losses.losses import KLDivergence as KLDivergence
from keras.src.losses.losses import LogCosh as LogCosh
from keras.src.losses.losses import MeanAbsoluteError as MeanAbsoluteError
from keras.src.losses.losses import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.losses.losses import MeanSquaredError as MeanSquaredError
from keras.src.losses.losses import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.losses.losses import Poisson as Poisson
from keras.src.losses.losses import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.losses.losses import SquaredHinge as SquaredHinge
from keras.src.losses.losses import Tversky as Tversky
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_generalized_cross_entropy as categorical_generalized_cross_entropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import circle as circle
from keras.src.losses.losses import cosine_similarity as cosine_similarity
from keras.src.losses.losses import ctc as ctc
from keras.src.losses.losses import dice as dice
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.losses.losses import tversky as tversky
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/visualization/__init__.py | keras/api/_tf_keras/keras/visualization/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.visualization.draw_bounding_boxes import (
draw_bounding_boxes as draw_bounding_boxes,
)
from keras.src.visualization.draw_segmentation_masks import (
draw_segmentation_masks as draw_segmentation_masks,
)
from keras.src.visualization.plot_bounding_box_gallery import (
plot_bounding_box_gallery as plot_bounding_box_gallery,
)
from keras.src.visualization.plot_image_gallery import (
plot_image_gallery as plot_image_gallery,
)
from keras.src.visualization.plot_segmentation_mask_gallery import (
plot_segmentation_mask_gallery as plot_segmentation_mask_gallery,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/__init__.py | keras/api/_tf_keras/keras/datasets/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.datasets import boston_housing as boston_housing
from keras.datasets import california_housing as california_housing
from keras.datasets import cifar10 as cifar10
from keras.datasets import cifar100 as cifar100
from keras.datasets import fashion_mnist as fashion_mnist
from keras.datasets import imdb as imdb
from keras.datasets import mnist as mnist
from keras.datasets import reuters as reuters
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/reuters/__init__.py | keras/api/_tf_keras/keras/datasets/reuters/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names as get_label_names
from keras.src.datasets.reuters import get_word_index as get_word_index
from keras.src.datasets.reuters import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/cifar10/__init__.py | keras/api/_tf_keras/keras/datasets/cifar10/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar10 import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py | keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/cifar100/__init__.py | keras/api/_tf_keras/keras/datasets/cifar100/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py | keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.boston_housing import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/california_housing/__init__.py | keras/api/_tf_keras/keras/datasets/california_housing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/imdb/__init__.py | keras/api/_tf_keras/keras/datasets/imdb/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index as get_word_index
from keras.src.datasets.imdb import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/datasets/mnist/__init__.py | keras/api/_tf_keras/keras/datasets/mnist/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/metrics/__init__.py | keras/api/_tf_keras/keras/metrics/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.metrics import deserialize as deserialize
from keras.src.metrics import get as get
from keras.src.metrics import serialize as serialize
from keras.src.metrics.accuracy_metrics import Accuracy as Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy as BinaryAccuracy
from keras.src.metrics.accuracy_metrics import (
CategoricalAccuracy as CategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
SparseCategoricalAccuracy as SparseCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
SparseTopKCategoricalAccuracy as SparseTopKCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
TopKCategoricalAccuracy as TopKCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
binary_accuracy as binary_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
categorical_accuracy as categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
sparse_categorical_accuracy as sparse_categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
sparse_top_k_categorical_accuracy as sparse_top_k_categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
top_k_categorical_accuracy as top_k_categorical_accuracy,
)
from keras.src.metrics.confusion_metrics import AUC as AUC
from keras.src.metrics.confusion_metrics import FalseNegatives as FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives as FalsePositives
from keras.src.metrics.confusion_metrics import Precision as Precision
from keras.src.metrics.confusion_metrics import (
PrecisionAtRecall as PrecisionAtRecall,
)
from keras.src.metrics.confusion_metrics import Recall as Recall
from keras.src.metrics.confusion_metrics import (
RecallAtPrecision as RecallAtPrecision,
)
from keras.src.metrics.confusion_metrics import (
SensitivityAtSpecificity as SensitivityAtSpecificity,
)
from keras.src.metrics.confusion_metrics import (
SpecificityAtSensitivity as SpecificityAtSensitivity,
)
from keras.src.metrics.confusion_metrics import TrueNegatives as TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives as TruePositives
from keras.src.metrics.correlation_metrics import (
ConcordanceCorrelation as ConcordanceCorrelation,
)
from keras.src.metrics.correlation_metrics import (
PearsonCorrelation as PearsonCorrelation,
)
from keras.src.metrics.correlation_metrics import (
concordance_correlation as concordance_correlation,
)
from keras.src.metrics.correlation_metrics import (
pearson_correlation as pearson_correlation,
)
from keras.src.metrics.f_score_metrics import F1Score as F1Score
from keras.src.metrics.f_score_metrics import FBetaScore as FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge as CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge as Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge as SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU as BinaryIoU
from keras.src.metrics.iou_metrics import IoU as IoU
from keras.src.metrics.iou_metrics import MeanIoU as MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU as OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU as OneHotMeanIoU
from keras.src.metrics.metric import Metric as Metric
from keras.src.metrics.probabilistic_metrics import (
BinaryCrossentropy as BinaryCrossentropy,
)
from keras.src.metrics.probabilistic_metrics import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.metrics.probabilistic_metrics import KLDivergence as KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson as Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean as Mean
from keras.src.metrics.reduction_metrics import (
MeanMetricWrapper as MeanMetricWrapper,
)
from keras.src.metrics.reduction_metrics import Sum as Sum
from keras.src.metrics.regression_metrics import (
CosineSimilarity as CosineSimilarity,
)
from keras.src.metrics.regression_metrics import LogCoshError as LogCoshError
from keras.src.metrics.regression_metrics import (
MeanAbsoluteError as MeanAbsoluteError,
)
from keras.src.metrics.regression_metrics import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.metrics.regression_metrics import (
MeanSquaredError as MeanSquaredError,
)
from keras.src.metrics.regression_metrics import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.metrics.regression_metrics import R2Score as R2Score
from keras.src.metrics.regression_metrics import (
RootMeanSquaredError as RootMeanSquaredError,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/models/__init__.py | keras/api/_tf_keras/keras/models/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.models.cloning import clone_model as clone_model
from keras.src.models.model import Model as Model
from keras.src.models.model import model_from_json as model_from_json
from keras.src.models.sequential import Sequential as Sequential
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import save_model as save_model
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/__init__.py | keras/api/_tf_keras/keras/applications/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.applications import convnext as convnext
from keras.applications import densenet as densenet
from keras.applications import efficientnet as efficientnet
from keras.applications import efficientnet_v2 as efficientnet_v2
from keras.applications import imagenet_utils as imagenet_utils
from keras.applications import inception_resnet_v2 as inception_resnet_v2
from keras.applications import inception_v3 as inception_v3
from keras.applications import mobilenet as mobilenet
from keras.applications import mobilenet_v2 as mobilenet_v2
from keras.applications import mobilenet_v3 as mobilenet_v3
from keras.applications import nasnet as nasnet
from keras.applications import resnet as resnet
from keras.applications import resnet50 as resnet50
from keras.applications import resnet_v2 as resnet_v2
from keras.applications import vgg16 as vgg16
from keras.applications import vgg19 as vgg19
from keras.applications import xception as xception
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Large as MobileNetV3Large,
)
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Small as MobileNetV3Small,
)
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.xception import Xception as Xception
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py | keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.efficientnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.efficientnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py | keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_resnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/nasnet/__init__.py | keras/api/_tf_keras/keras/applications/nasnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.nasnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.nasnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/vgg16/__init__.py | keras/api/_tf_keras/keras/applications/vgg16/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg16 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg16 import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/convnext/__init__.py | keras/api/_tf_keras/keras/applications/convnext/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.convnext import (
decode_predictions as decode_predictions,
)
from keras.src.applications.convnext import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py | keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/xception/__init__.py | keras/api/_tf_keras/keras/applications/xception/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception as Xception
from keras.src.applications.xception import (
decode_predictions as decode_predictions,
)
from keras.src.applications.xception import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py | keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v3 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/mobilenet/__init__.py | keras/api/_tf_keras/keras/applications/mobilenet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/resnet/__init__.py | keras/api/_tf_keras/keras/applications/resnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py | keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import (
decode_predictions as decode_predictions,
)
from keras.src.applications.imagenet_utils import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py | keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/resnet50/__init__.py | keras/api/_tf_keras/keras/applications/resnet50/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/densenet/__init__.py | keras/api/_tf_keras/keras/applications/densenet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.densenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.densenet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/vgg19/__init__.py | keras/api/_tf_keras/keras/applications/vgg19/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.vgg19 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg19 import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/efficientnet/__init__.py | keras/api/_tf_keras/keras/applications/efficientnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.efficientnet import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/applications/inception_v3/__init__.py | keras/api/_tf_keras/keras/applications/inception_v3/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.inception_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_v3 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/activations/__init__.py | keras/api/_tf_keras/keras/activations/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activations import celu as celu
from keras.src.activations.activations import elu as elu
from keras.src.activations.activations import exponential as exponential
from keras.src.activations.activations import gelu as gelu
from keras.src.activations.activations import glu as glu
from keras.src.activations.activations import hard_shrink as hard_shrink
from keras.src.activations.activations import hard_sigmoid as hard_sigmoid
from keras.src.activations.activations import hard_silu as hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh as hard_tanh
from keras.src.activations.activations import leaky_relu as leaky_relu
from keras.src.activations.activations import linear as linear
from keras.src.activations.activations import log_sigmoid as log_sigmoid
from keras.src.activations.activations import log_softmax as log_softmax
from keras.src.activations.activations import mish as mish
from keras.src.activations.activations import relu as relu
from keras.src.activations.activations import relu6 as relu6
from keras.src.activations.activations import selu as selu
from keras.src.activations.activations import sigmoid as sigmoid
from keras.src.activations.activations import silu as silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink as soft_shrink
from keras.src.activations.activations import softmax as softmax
from keras.src.activations.activations import softplus as softplus
from keras.src.activations.activations import softsign as softsign
from keras.src.activations.activations import sparse_plus as sparse_plus
from keras.src.activations.activations import sparse_sigmoid as sparse_sigmoid
from keras.src.activations.activations import sparsemax as sparsemax
from keras.src.activations.activations import squareplus as squareplus
from keras.src.activations.activations import tanh as tanh
from keras.src.activations.activations import tanh_shrink as tanh_shrink
from keras.src.activations.activations import threshold as threshold
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/utils/__init__.py | keras/api/_tf_keras/keras/utils/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.layers.preprocessing.feature_space import (
FeatureSpace as FeatureSpace,
)
from keras.src.ops.operation_utils import get_source_inputs as get_source_inputs
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight as pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight as unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as PyDataset,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import (
audio_dataset_from_directory as audio_dataset_from_directory,
)
from keras.src.utils.config import Config as Config
from keras.src.utils.dataset_utils import split_dataset as split_dataset
from keras.src.utils.file_utils import get_file as get_file
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.model_visualization import model_to_dot as model_to_dot
from keras.src.utils.model_visualization import plot_model as plot_model
from keras.src.utils.numerical_utils import normalize as normalize
from keras.src.utils.numerical_utils import to_categorical as to_categorical
from keras.src.utils.progbar import Progbar as Progbar
from keras.src.utils.rng_utils import set_random_seed as set_random_seed
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
from keras.utils import bounding_boxes as bounding_boxes
from keras.utils import legacy as legacy
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/utils/bounding_boxes/__init__.py | keras/api/_tf_keras/keras/utils/bounding_boxes/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform as affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size as clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format as convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop as crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes as decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas as encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad as pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou as compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou as compute_iou,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/utils/legacy/__init__.py | keras/api/_tf_keras/keras/utils/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/callbacks/__init__.py | keras/api/_tf_keras/keras/callbacks/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.callbacks.backup_and_restore import (
BackupAndRestore as BackupAndRestore,
)
from keras.src.callbacks.callback import Callback as Callback
from keras.src.callbacks.callback_list import CallbackList as CallbackList
from keras.src.callbacks.csv_logger import CSVLogger as CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping as EarlyStopping
from keras.src.callbacks.history import History as History
from keras.src.callbacks.lambda_callback import LambdaCallback as LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import (
LearningRateScheduler as LearningRateScheduler,
)
from keras.src.callbacks.model_checkpoint import (
ModelCheckpoint as ModelCheckpoint,
)
from keras.src.callbacks.orbax_checkpoint import (
OrbaxCheckpoint as OrbaxCheckpoint,
)
from keras.src.callbacks.progbar_logger import ProgbarLogger as ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import (
ReduceLROnPlateau as ReduceLROnPlateau,
)
from keras.src.callbacks.remote_monitor import RemoteMonitor as RemoteMonitor
from keras.src.callbacks.swap_ema_weights import (
SwapEMAWeights as SwapEMAWeights,
)
from keras.src.callbacks.tensorboard import TensorBoard as TensorBoard
from keras.src.callbacks.terminate_on_nan import (
TerminateOnNaN as TerminateOnNaN,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/backend/__init__.py | keras/api/_tf_keras/keras/backend/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.dtypes import result_type as result_type
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import is_float_dtype as is_float_dtype
from keras.src.backend.common.variables import is_int_dtype as is_int_dtype
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.backend.config import backend as backend
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.legacy.backend import abs as abs
from keras.src.legacy.backend import all as all
from keras.src.legacy.backend import any as any
from keras.src.legacy.backend import arange as arange
from keras.src.legacy.backend import argmax as argmax
from keras.src.legacy.backend import argmin as argmin
from keras.src.legacy.backend import batch_dot as batch_dot
from keras.src.legacy.backend import batch_flatten as batch_flatten
from keras.src.legacy.backend import batch_get_value as batch_get_value
from keras.src.legacy.backend import batch_normalization as batch_normalization
from keras.src.legacy.backend import batch_set_value as batch_set_value
from keras.src.legacy.backend import bias_add as bias_add
from keras.src.legacy.backend import binary_crossentropy as binary_crossentropy
from keras.src.legacy.backend import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.legacy.backend import cast as cast
from keras.src.legacy.backend import cast_to_floatx as cast_to_floatx
from keras.src.legacy.backend import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.legacy.backend import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.legacy.backend import clip as clip
from keras.src.legacy.backend import concatenate as concatenate
from keras.src.legacy.backend import constant as constant
from keras.src.legacy.backend import conv1d as conv1d
from keras.src.legacy.backend import conv2d as conv2d
from keras.src.legacy.backend import conv2d_transpose as conv2d_transpose
from keras.src.legacy.backend import conv3d as conv3d
from keras.src.legacy.backend import cos as cos
from keras.src.legacy.backend import count_params as count_params
from keras.src.legacy.backend import ctc_batch_cost as ctc_batch_cost
from keras.src.legacy.backend import ctc_decode as ctc_decode
from keras.src.legacy.backend import (
ctc_label_dense_to_sparse as ctc_label_dense_to_sparse,
)
from keras.src.legacy.backend import cumprod as cumprod
from keras.src.legacy.backend import cumsum as cumsum
from keras.src.legacy.backend import depthwise_conv2d as depthwise_conv2d
from keras.src.legacy.backend import dot as dot
from keras.src.legacy.backend import dropout as dropout
from keras.src.legacy.backend import dtype as dtype
from keras.src.legacy.backend import elu as elu
from keras.src.legacy.backend import equal as equal
from keras.src.legacy.backend import eval as eval
from keras.src.legacy.backend import exp as exp
from keras.src.legacy.backend import expand_dims as expand_dims
from keras.src.legacy.backend import eye as eye
from keras.src.legacy.backend import flatten as flatten
from keras.src.legacy.backend import foldl as foldl
from keras.src.legacy.backend import foldr as foldr
from keras.src.legacy.backend import gather as gather
from keras.src.legacy.backend import get_value as get_value
from keras.src.legacy.backend import gradients as gradients
from keras.src.legacy.backend import greater as greater
from keras.src.legacy.backend import greater_equal as greater_equal
from keras.src.legacy.backend import hard_sigmoid as hard_sigmoid
from keras.src.legacy.backend import in_top_k as in_top_k
from keras.src.legacy.backend import int_shape as int_shape
from keras.src.legacy.backend import is_sparse as is_sparse
from keras.src.legacy.backend import l2_normalize as l2_normalize
from keras.src.legacy.backend import less as less
from keras.src.legacy.backend import less_equal as less_equal
from keras.src.legacy.backend import log as log
from keras.src.legacy.backend import map_fn as map_fn
from keras.src.legacy.backend import max as max
from keras.src.legacy.backend import maximum as maximum
from keras.src.legacy.backend import mean as mean
from keras.src.legacy.backend import min as min
from keras.src.legacy.backend import minimum as minimum
from keras.src.legacy.backend import (
moving_average_update as moving_average_update,
)
from keras.src.legacy.backend import name_scope as name_scope
from keras.src.legacy.backend import ndim as ndim
from keras.src.legacy.backend import not_equal as not_equal
from keras.src.legacy.backend import one_hot as one_hot
from keras.src.legacy.backend import ones as ones
from keras.src.legacy.backend import ones_like as ones_like
from keras.src.legacy.backend import permute_dimensions as permute_dimensions
from keras.src.legacy.backend import pool2d as pool2d
from keras.src.legacy.backend import pool3d as pool3d
from keras.src.legacy.backend import pow as pow
from keras.src.legacy.backend import prod as prod
from keras.src.legacy.backend import random_bernoulli as random_bernoulli
from keras.src.legacy.backend import random_normal as random_normal
from keras.src.legacy.backend import (
random_normal_variable as random_normal_variable,
)
from keras.src.legacy.backend import random_uniform as random_uniform
from keras.src.legacy.backend import (
random_uniform_variable as random_uniform_variable,
)
from keras.src.legacy.backend import relu as relu
from keras.src.legacy.backend import repeat as repeat
from keras.src.legacy.backend import repeat_elements as repeat_elements
from keras.src.legacy.backend import reshape as reshape
from keras.src.legacy.backend import resize_images as resize_images
from keras.src.legacy.backend import resize_volumes as resize_volumes
from keras.src.legacy.backend import reverse as reverse
from keras.src.legacy.backend import rnn as rnn
from keras.src.legacy.backend import round as round
from keras.src.legacy.backend import separable_conv2d as separable_conv2d
from keras.src.legacy.backend import set_value as set_value
from keras.src.legacy.backend import shape as shape
from keras.src.legacy.backend import sigmoid as sigmoid
from keras.src.legacy.backend import sign as sign
from keras.src.legacy.backend import sin as sin
from keras.src.legacy.backend import softmax as softmax
from keras.src.legacy.backend import softplus as softplus
from keras.src.legacy.backend import softsign as softsign
from keras.src.legacy.backend import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.legacy.backend import spatial_2d_padding as spatial_2d_padding
from keras.src.legacy.backend import spatial_3d_padding as spatial_3d_padding
from keras.src.legacy.backend import sqrt as sqrt
from keras.src.legacy.backend import square as square
from keras.src.legacy.backend import squeeze as squeeze
from keras.src.legacy.backend import stack as stack
from keras.src.legacy.backend import std as std
from keras.src.legacy.backend import stop_gradient as stop_gradient
from keras.src.legacy.backend import sum as sum
from keras.src.legacy.backend import switch as switch
from keras.src.legacy.backend import tanh as tanh
from keras.src.legacy.backend import temporal_padding as temporal_padding
from keras.src.legacy.backend import tile as tile
from keras.src.legacy.backend import to_dense as to_dense
from keras.src.legacy.backend import transpose as transpose
from keras.src.legacy.backend import truncated_normal as truncated_normal
from keras.src.legacy.backend import update as update
from keras.src.legacy.backend import update_add as update_add
from keras.src.legacy.backend import update_sub as update_sub
from keras.src.legacy.backend import var as var
from keras.src.legacy.backend import variable as variable
from keras.src.legacy.backend import zeros as zeros
from keras.src.legacy.backend import zeros_like as zeros_like
from keras.src.utils.naming import get_uid as get_uid
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/constraints/__init__.py | keras/api/_tf_keras/keras/constraints/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize as deserialize
from keras.src.constraints import get as get
from keras.src.constraints import serialize as serialize
from keras.src.constraints.constraints import Constraint as Constraint
from keras.src.constraints.constraints import MaxNorm as MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg as NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm as UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/wrappers/__init__.py | keras/api/_tf_keras/keras/wrappers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import (
SKLearnClassifier as SKLearnClassifier,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnRegressor as SKLearnRegressor,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnTransformer as SKLearnTransformer,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/distribution/__init__.py | keras/api/_tf_keras/keras/distribution/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel as DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh
from keras.src.distribution.distribution_lib import LayoutMap as LayoutMap
from keras.src.distribution.distribution_lib import (
ModelParallel as ModelParallel,
)
from keras.src.distribution.distribution_lib import TensorLayout as TensorLayout
from keras.src.distribution.distribution_lib import (
distribute_tensor as distribute_tensor,
)
from keras.src.distribution.distribution_lib import distribution as distribution
from keras.src.distribution.distribution_lib import (
get_device_count as get_device_count,
)
from keras.src.distribution.distribution_lib import initialize as initialize
from keras.src.distribution.distribution_lib import list_devices as list_devices
from keras.src.distribution.distribution_lib import (
set_distribution as set_distribution,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/mixed_precision/__init__.py | keras/api/_tf_keras/keras/mixed_precision/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/layers/__init__.py | keras/api/_tf_keras/keras/layers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.tfsm_layer import TFSMLayer as TFSMLayer
from keras.src.layers import deserialize as deserialize
from keras.src.layers import serialize as serialize
from keras.src.layers.activations.activation import Activation as Activation
from keras.src.layers.activations.elu import ELU as ELU
from keras.src.layers.activations.leaky_relu import LeakyReLU as LeakyReLU
from keras.src.layers.activations.prelu import PReLU as PReLU
from keras.src.layers.activations.relu import ReLU as ReLU
from keras.src.layers.activations.softmax import Softmax as Softmax
from keras.src.layers.attention.additive_attention import (
AdditiveAttention as AdditiveAttention,
)
from keras.src.layers.attention.attention import Attention as Attention
from keras.src.layers.attention.grouped_query_attention import (
GroupedQueryAttention as GroupQueryAttention,
)
from keras.src.layers.attention.multi_head_attention import (
MultiHeadAttention as MultiHeadAttention,
)
from keras.src.layers.convolutional.conv1d import Conv1D as Conv1D
from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D
from keras.src.layers.convolutional.conv1d_transpose import (
Conv1DTranspose as Conv1DTranspose,
)
from keras.src.layers.convolutional.conv1d_transpose import (
Conv1DTranspose as Convolution1DTranspose,
)
from keras.src.layers.convolutional.conv2d import Conv2D as Conv2D
from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D
from keras.src.layers.convolutional.conv2d_transpose import (
Conv2DTranspose as Conv2DTranspose,
)
from keras.src.layers.convolutional.conv2d_transpose import (
Conv2DTranspose as Convolution2DTranspose,
)
from keras.src.layers.convolutional.conv3d import Conv3D as Conv3D
from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D
from keras.src.layers.convolutional.conv3d_transpose import (
Conv3DTranspose as Conv3DTranspose,
)
from keras.src.layers.convolutional.conv3d_transpose import (
Conv3DTranspose as Convolution3DTranspose,
)
from keras.src.layers.convolutional.depthwise_conv1d import (
DepthwiseConv1D as DepthwiseConv1D,
)
from keras.src.layers.convolutional.depthwise_conv2d import (
DepthwiseConv2D as DepthwiseConv2D,
)
from keras.src.layers.convolutional.separable_conv1d import (
SeparableConv1D as SeparableConv1D,
)
from keras.src.layers.convolutional.separable_conv1d import (
SeparableConv1D as SeparableConvolution1D,
)
from keras.src.layers.convolutional.separable_conv2d import (
SeparableConv2D as SeparableConv2D,
)
from keras.src.layers.convolutional.separable_conv2d import (
SeparableConv2D as SeparableConvolution2D,
)
from keras.src.layers.core.dense import Dense as Dense
from keras.src.layers.core.einsum_dense import EinsumDense as EinsumDense
from keras.src.layers.core.embedding import Embedding as Embedding
from keras.src.layers.core.identity import Identity as Identity
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.core.input_layer import InputLayer as InputLayer
from keras.src.layers.core.lambda_layer import Lambda as Lambda
from keras.src.layers.core.masking import Masking as Masking
from keras.src.layers.core.reversible_embedding import (
ReversibleEmbedding as ReversibleEmbedding,
)
from keras.src.layers.core.wrapper import Wrapper as Wrapper
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.layers.merging.add import Add as Add
from keras.src.layers.merging.add import add as add
from keras.src.layers.merging.average import Average as Average
from keras.src.layers.merging.average import average as average
from keras.src.layers.merging.concatenate import Concatenate as Concatenate
from keras.src.layers.merging.concatenate import concatenate as concatenate
from keras.src.layers.merging.dot import Dot as Dot
from keras.src.layers.merging.dot import dot as dot
from keras.src.layers.merging.maximum import Maximum as Maximum
from keras.src.layers.merging.maximum import maximum as maximum
from keras.src.layers.merging.minimum import Minimum as Minimum
from keras.src.layers.merging.minimum import minimum as minimum
from keras.src.layers.merging.multiply import Multiply as Multiply
from keras.src.layers.merging.multiply import multiply as multiply
from keras.src.layers.merging.subtract import Subtract as Subtract
from keras.src.layers.merging.subtract import subtract as subtract
from keras.src.layers.normalization.batch_normalization import (
BatchNormalization as BatchNormalization,
)
from keras.src.layers.normalization.group_normalization import (
GroupNormalization as GroupNormalization,
)
from keras.src.layers.normalization.layer_normalization import (
LayerNormalization as LayerNormalization,
)
from keras.src.layers.normalization.rms_normalization import (
RMSNormalization as RMSNormalization,
)
from keras.src.layers.normalization.spectral_normalization import (
SpectralNormalization as SpectralNormalization,
)
from keras.src.layers.normalization.unit_normalization import (
UnitNormalization as UnitNormalization,
)
from keras.src.layers.pooling.adaptive_average_pooling1d import (
AdaptiveAveragePooling1D as AdaptiveAveragePooling1D,
)
from keras.src.layers.pooling.adaptive_average_pooling2d import (
AdaptiveAveragePooling2D as AdaptiveAveragePooling2D,
)
from keras.src.layers.pooling.adaptive_average_pooling3d import (
AdaptiveAveragePooling3D as AdaptiveAveragePooling3D,
)
from keras.src.layers.pooling.adaptive_max_pooling1d import (
AdaptiveMaxPooling1D as AdaptiveMaxPooling1D,
)
from keras.src.layers.pooling.adaptive_max_pooling2d import (
AdaptiveMaxPooling2D as AdaptiveMaxPooling2D,
)
from keras.src.layers.pooling.adaptive_max_pooling3d import (
AdaptiveMaxPooling3D as AdaptiveMaxPooling3D,
)
from keras.src.layers.pooling.average_pooling1d import (
AveragePooling1D as AveragePooling1D,
)
from keras.src.layers.pooling.average_pooling1d import (
AveragePooling1D as AvgPool1D,
)
from keras.src.layers.pooling.average_pooling2d import (
AveragePooling2D as AveragePooling2D,
)
from keras.src.layers.pooling.average_pooling2d import (
AveragePooling2D as AvgPool2D,
)
from keras.src.layers.pooling.average_pooling3d import (
AveragePooling3D as AveragePooling3D,
)
from keras.src.layers.pooling.average_pooling3d import (
AveragePooling3D as AvgPool3D,
)
from keras.src.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D as GlobalAveragePooling1D,
)
from keras.src.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D as GlobalAvgPool1D,
)
from keras.src.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D as GlobalAveragePooling2D,
)
from keras.src.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D as GlobalAvgPool2D,
)
from keras.src.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D as GlobalAveragePooling3D,
)
from keras.src.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D as GlobalAvgPool3D,
)
from keras.src.layers.pooling.global_max_pooling1d import (
GlobalMaxPooling1D as GlobalMaxPool1D,
)
from keras.src.layers.pooling.global_max_pooling1d import (
GlobalMaxPooling1D as GlobalMaxPooling1D,
)
from keras.src.layers.pooling.global_max_pooling2d import (
GlobalMaxPooling2D as GlobalMaxPool2D,
)
from keras.src.layers.pooling.global_max_pooling2d import (
GlobalMaxPooling2D as GlobalMaxPooling2D,
)
from keras.src.layers.pooling.global_max_pooling3d import (
GlobalMaxPooling3D as GlobalMaxPool3D,
)
from keras.src.layers.pooling.global_max_pooling3d import (
GlobalMaxPooling3D as GlobalMaxPooling3D,
)
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPooling1D
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPooling2D
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPooling3D
from keras.src.layers.preprocessing.category_encoding import (
CategoryEncoding as CategoryEncoding,
)
from keras.src.layers.preprocessing.discretization import (
Discretization as Discretization,
)
from keras.src.layers.preprocessing.hashed_crossing import (
HashedCrossing as HashedCrossing,
)
from keras.src.layers.preprocessing.hashing import Hashing as Hashing
from keras.src.layers.preprocessing.image_preprocessing.aug_mix import (
AugMix as AugMix,
)
from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import (
AutoContrast as AutoContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.center_crop import (
CenterCrop as CenterCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.cut_mix import (
CutMix as CutMix,
)
from keras.src.layers.preprocessing.image_preprocessing.equalization import (
Equalization as Equalization,
)
from keras.src.layers.preprocessing.image_preprocessing.max_num_bounding_box import (
MaxNumBoundingBoxes as MaxNumBoundingBoxes,
)
from keras.src.layers.preprocessing.image_preprocessing.mix_up import (
MixUp as MixUp,
)
from keras.src.layers.preprocessing.image_preprocessing.rand_augment import (
RandAugment as RandAugment,
)
from keras.src.layers.preprocessing.image_preprocessing.random_brightness import (
RandomBrightness as RandomBrightness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_degeneration import (
RandomColorDegeneration as RandomColorDegeneration,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_jitter import (
RandomColorJitter as RandomColorJitter,
)
from keras.src.layers.preprocessing.image_preprocessing.random_contrast import (
RandomContrast as RandomContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.random_crop import (
RandomCrop as RandomCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.random_elastic_transform import (
RandomElasticTransform as RandomElasticTransform,
)
from keras.src.layers.preprocessing.image_preprocessing.random_erasing import (
RandomErasing as RandomErasing,
)
from keras.src.layers.preprocessing.image_preprocessing.random_flip import (
RandomFlip as RandomFlip,
)
from keras.src.layers.preprocessing.image_preprocessing.random_gaussian_blur import (
RandomGaussianBlur as RandomGaussianBlur,
)
from keras.src.layers.preprocessing.image_preprocessing.random_grayscale import (
RandomGrayscale as RandomGrayscale,
)
from keras.src.layers.preprocessing.image_preprocessing.random_hue import (
RandomHue as RandomHue,
)
from keras.src.layers.preprocessing.image_preprocessing.random_invert import (
RandomInvert as RandomInvert,
)
from keras.src.layers.preprocessing.image_preprocessing.random_perspective import (
RandomPerspective as RandomPerspective,
)
from keras.src.layers.preprocessing.image_preprocessing.random_posterization import (
RandomPosterization as RandomPosterization,
)
from keras.src.layers.preprocessing.image_preprocessing.random_rotation import (
RandomRotation as RandomRotation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_saturation import (
RandomSaturation as RandomSaturation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_sharpness import (
RandomSharpness as RandomSharpness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_shear import (
RandomShear as RandomShear,
)
from keras.src.layers.preprocessing.image_preprocessing.random_translation import (
RandomTranslation as RandomTranslation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_zoom import (
RandomZoom as RandomZoom,
)
from keras.src.layers.preprocessing.image_preprocessing.resizing import (
Resizing as Resizing,
)
from keras.src.layers.preprocessing.image_preprocessing.solarization import (
Solarization as Solarization,
)
from keras.src.layers.preprocessing.integer_lookup import (
IntegerLookup as IntegerLookup,
)
from keras.src.layers.preprocessing.mel_spectrogram import (
MelSpectrogram as MelSpectrogram,
)
from keras.src.layers.preprocessing.normalization import (
Normalization as Normalization,
)
from keras.src.layers.preprocessing.pipeline import Pipeline as Pipeline
from keras.src.layers.preprocessing.rescaling import Rescaling as Rescaling
from keras.src.layers.preprocessing.stft_spectrogram import (
STFTSpectrogram as STFTSpectrogram,
)
from keras.src.layers.preprocessing.string_lookup import (
StringLookup as StringLookup,
)
from keras.src.layers.preprocessing.text_vectorization import (
TextVectorization as TextVectorization,
)
from keras.src.layers.regularization.activity_regularization import (
ActivityRegularization as ActivityRegularization,
)
from keras.src.layers.regularization.dropout import Dropout as Dropout
from keras.src.layers.regularization.gaussian_dropout import (
GaussianDropout as GaussianDropout,
)
from keras.src.layers.regularization.gaussian_noise import (
GaussianNoise as GaussianNoise,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout1D as SpatialDropout1D,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout2D as SpatialDropout2D,
)
from keras.src.layers.regularization.spatial_dropout import (
SpatialDropout3D as SpatialDropout3D,
)
from keras.src.layers.reshaping.cropping1d import Cropping1D as Cropping1D
from keras.src.layers.reshaping.cropping2d import Cropping2D as Cropping2D
from keras.src.layers.reshaping.cropping3d import Cropping3D as Cropping3D
from keras.src.layers.reshaping.flatten import Flatten as Flatten
from keras.src.layers.reshaping.permute import Permute as Permute
from keras.src.layers.reshaping.repeat_vector import (
RepeatVector as RepeatVector,
)
from keras.src.layers.reshaping.reshape import Reshape as Reshape
from keras.src.layers.reshaping.up_sampling1d import (
UpSampling1D as UpSampling1D,
)
from keras.src.layers.reshaping.up_sampling2d import (
UpSampling2D as UpSampling2D,
)
from keras.src.layers.reshaping.up_sampling3d import (
UpSampling3D as UpSampling3D,
)
from keras.src.layers.reshaping.zero_padding1d import (
ZeroPadding1D as ZeroPadding1D,
)
from keras.src.layers.reshaping.zero_padding2d import (
ZeroPadding2D as ZeroPadding2D,
)
from keras.src.layers.reshaping.zero_padding3d import (
ZeroPadding3D as ZeroPadding3D,
)
from keras.src.layers.rnn.bidirectional import Bidirectional as Bidirectional
from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D as ConvLSTM1D
from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D as ConvLSTM2D
from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D as ConvLSTM3D
from keras.src.layers.rnn.gru import GRU as GRU
from keras.src.layers.rnn.gru import GRUCell as GRUCell
from keras.src.layers.rnn.lstm import LSTM as LSTM
from keras.src.layers.rnn.lstm import LSTMCell as LSTMCell
from keras.src.layers.rnn.rnn import RNN as RNN
from keras.src.layers.rnn.simple_rnn import SimpleRNN as SimpleRNN
from keras.src.layers.rnn.simple_rnn import SimpleRNNCell as SimpleRNNCell
from keras.src.layers.rnn.stacked_rnn_cells import (
StackedRNNCells as StackedRNNCells,
)
from keras.src.layers.rnn.time_distributed import (
TimeDistributed as TimeDistributed,
)
from keras.src.legacy.layers import AlphaDropout as AlphaDropout
from keras.src.legacy.layers import RandomHeight as RandomHeight
from keras.src.legacy.layers import RandomWidth as RandomWidth
from keras.src.legacy.layers import ThresholdedReLU as ThresholdedReLU
from keras.src.utils.jax_layer import FlaxLayer as FlaxLayer
from keras.src.utils.jax_layer import JaxLayer as JaxLayer
from keras.src.utils.torch_utils import TorchModuleWrapper as TorchModuleWrapper
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/legacy/__init__.py | keras/api/_tf_keras/keras/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.legacy import saving as saving
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/legacy/saving/__init__.py | keras/api/_tf_keras/keras/legacy/saving/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/config/__init__.py | keras/api/_tf_keras/keras/config/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
enable_flash_attention as enable_flash_attention,
)
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import (
is_flash_attention_enabled as is_flash_attention_enabled,
)
from keras.src.backend.config import is_nnx_enabled as is_nnx_enabled
from keras.src.backend.config import max_epochs as max_epochs
from keras.src.backend.config import max_steps_per_epoch as max_steps_per_epoch
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.backend.config import set_max_epochs as set_max_epochs
from keras.src.backend.config import (
set_max_steps_per_epoch as set_max_steps_per_epoch,
)
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.saving.serialization_lib import (
enable_unsafe_deserialization as enable_unsafe_deserialization,
)
from keras.src.utils.backend_utils import set_backend as set_backend
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.traceback_utils import (
disable_traceback_filtering as disable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
enable_traceback_filtering as enable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
is_traceback_filtering_enabled as is_traceback_filtering_enabled,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/export/__init__.py | keras/api/_tf_keras/keras/export/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive as ExportArchive
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/random/__init__.py | keras/api/_tf_keras/keras/random/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta as beta
from keras.src.random.random import binomial as binomial
from keras.src.random.random import categorical as categorical
from keras.src.random.random import dropout as dropout
from keras.src.random.random import gamma as gamma
from keras.src.random.random import normal as normal
from keras.src.random.random import randint as randint
from keras.src.random.random import shuffle as shuffle
from keras.src.random.random import truncated_normal as truncated_normal
from keras.src.random.random import uniform as uniform
from keras.src.random.seed_generator import SeedGenerator as SeedGenerator
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/tree/__init__.py | keras/api/_tf_keras/keras/tree/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths as assert_same_paths
from keras.src.tree.tree_api import (
assert_same_structure as assert_same_structure,
)
from keras.src.tree.tree_api import flatten as flatten
from keras.src.tree.tree_api import flatten_with_path as flatten_with_path
from keras.src.tree.tree_api import is_nested as is_nested
from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure as map_shape_structure
from keras.src.tree.tree_api import map_structure as map_structure
from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as
from keras.src.tree.tree_api import traverse as traverse
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/ops/__init__.py | keras/api/_tf_keras/keras/ops/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.ops import image as image
from keras.ops import linalg as linalg
from keras.ops import nn as nn
from keras.ops import numpy as numpy
from keras.src.ops.core import associative_scan as associative_scan
from keras.src.ops.core import cast as cast
from keras.src.ops.core import cond as cond
from keras.src.ops.core import convert_to_numpy as convert_to_numpy
from keras.src.ops.core import convert_to_tensor as convert_to_tensor
from keras.src.ops.core import custom_gradient as custom_gradient
from keras.src.ops.core import dtype as dtype
from keras.src.ops.core import fori_loop as fori_loop
from keras.src.ops.core import is_tensor as is_tensor
from keras.src.ops.core import map as map
from keras.src.ops.core import saturate_cast as saturate_cast
from keras.src.ops.core import scan as scan
from keras.src.ops.core import scatter as scatter
from keras.src.ops.core import scatter_update as scatter_update
from keras.src.ops.core import shape as shape
from keras.src.ops.core import slice as slice
from keras.src.ops.core import slice_update as slice_update
from keras.src.ops.core import stop_gradient as stop_gradient
from keras.src.ops.core import switch as switch
from keras.src.ops.core import unstack as unstack
from keras.src.ops.core import vectorized_map as vectorized_map
from keras.src.ops.core import while_loop as while_loop
from keras.src.ops.einops import rearrange as rearrange
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import cholesky_inverse as cholesky_inverse
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import jvp as jvp
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
from keras.src.ops.math import erf as erf
from keras.src.ops.math import erfinv as erfinv
from keras.src.ops.math import extract_sequences as extract_sequences
from keras.src.ops.math import fft as fft
from keras.src.ops.math import fft2 as fft2
from keras.src.ops.math import ifft2 as ifft2
from keras.src.ops.math import in_top_k as in_top_k
from keras.src.ops.math import irfft as irfft
from keras.src.ops.math import istft as istft
from keras.src.ops.math import logdet as logdet
from keras.src.ops.math import logsumexp as logsumexp
from keras.src.ops.math import rfft as rfft
from keras.src.ops.math import rsqrt as rsqrt
from keras.src.ops.math import segment_max as segment_max
from keras.src.ops.math import segment_sum as segment_sum
from keras.src.ops.math import stft as stft
from keras.src.ops.math import top_k as top_k
from keras.src.ops.math import view_as_complex as view_as_complex
from keras.src.ops.math import view_as_real as view_as_real
from keras.src.ops.nn import adaptive_average_pool as adaptive_average_pool
from keras.src.ops.nn import adaptive_max_pool as adaptive_max_pool
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
from keras.src.ops.nn import unfold as unfold
from keras.src.ops.numpy import abs as abs
from keras.src.ops.numpy import absolute as absolute
from keras.src.ops.numpy import add as add
from keras.src.ops.numpy import all as all
from keras.src.ops.numpy import amax as amax
from keras.src.ops.numpy import amin as amin
from keras.src.ops.numpy import angle as angle
from keras.src.ops.numpy import any as any
from keras.src.ops.numpy import append as append
from keras.src.ops.numpy import arange as arange
from keras.src.ops.numpy import arccos as arccos
from keras.src.ops.numpy import arccosh as arccosh
from keras.src.ops.numpy import arcsin as arcsin
from keras.src.ops.numpy import arcsinh as arcsinh
from keras.src.ops.numpy import arctan as arctan
from keras.src.ops.numpy import arctan2 as arctan2
from keras.src.ops.numpy import arctanh as arctanh
from keras.src.ops.numpy import argmax as argmax
from keras.src.ops.numpy import argmin as argmin
from keras.src.ops.numpy import argpartition as argpartition
from keras.src.ops.numpy import argsort as argsort
from keras.src.ops.numpy import array as array
from keras.src.ops.numpy import array_split as array_split
from keras.src.ops.numpy import average as average
from keras.src.ops.numpy import bartlett as bartlett
from keras.src.ops.numpy import bincount as bincount
from keras.src.ops.numpy import bitwise_and as bitwise_and
from keras.src.ops.numpy import bitwise_invert as bitwise_invert
from keras.src.ops.numpy import bitwise_left_shift as bitwise_left_shift
from keras.src.ops.numpy import bitwise_not as bitwise_not
from keras.src.ops.numpy import bitwise_or as bitwise_or
from keras.src.ops.numpy import bitwise_right_shift as bitwise_right_shift
from keras.src.ops.numpy import bitwise_xor as bitwise_xor
from keras.src.ops.numpy import blackman as blackman
from keras.src.ops.numpy import broadcast_to as broadcast_to
from keras.src.ops.numpy import cbrt as cbrt
from keras.src.ops.numpy import ceil as ceil
from keras.src.ops.numpy import clip as clip
from keras.src.ops.numpy import concatenate as concatenate
from keras.src.ops.numpy import conj as conj
from keras.src.ops.numpy import conjugate as conjugate
from keras.src.ops.numpy import copy as copy
from keras.src.ops.numpy import corrcoef as corrcoef
from keras.src.ops.numpy import correlate as correlate
from keras.src.ops.numpy import cos as cos
from keras.src.ops.numpy import cosh as cosh
from keras.src.ops.numpy import count_nonzero as count_nonzero
from keras.src.ops.numpy import cross as cross
from keras.src.ops.numpy import cumprod as cumprod
from keras.src.ops.numpy import cumsum as cumsum
from keras.src.ops.numpy import deg2rad as deg2rad
from keras.src.ops.numpy import diag as diag
from keras.src.ops.numpy import diagflat as diagflat
from keras.src.ops.numpy import diagonal as diagonal
from keras.src.ops.numpy import diff as diff
from keras.src.ops.numpy import digitize as digitize
from keras.src.ops.numpy import divide as divide
from keras.src.ops.numpy import divide_no_nan as divide_no_nan
from keras.src.ops.numpy import dot as dot
from keras.src.ops.numpy import einsum as einsum
from keras.src.ops.numpy import empty as empty
from keras.src.ops.numpy import empty_like as empty_like
from keras.src.ops.numpy import equal as equal
from keras.src.ops.numpy import exp as exp
from keras.src.ops.numpy import exp2 as exp2
from keras.src.ops.numpy import expand_dims as expand_dims
from keras.src.ops.numpy import expm1 as expm1
from keras.src.ops.numpy import eye as eye
from keras.src.ops.numpy import flip as flip
from keras.src.ops.numpy import floor as floor
from keras.src.ops.numpy import floor_divide as floor_divide
from keras.src.ops.numpy import full as full
from keras.src.ops.numpy import full_like as full_like
from keras.src.ops.numpy import gcd as gcd
from keras.src.ops.numpy import get_item as get_item
from keras.src.ops.numpy import greater as greater
from keras.src.ops.numpy import greater_equal as greater_equal
from keras.src.ops.numpy import hamming as hamming
from keras.src.ops.numpy import hanning as hanning
from keras.src.ops.numpy import heaviside as heaviside
from keras.src.ops.numpy import histogram as histogram
from keras.src.ops.numpy import hstack as hstack
from keras.src.ops.numpy import hypot as hypot
from keras.src.ops.numpy import identity as identity
from keras.src.ops.numpy import imag as imag
from keras.src.ops.numpy import inner as inner
from keras.src.ops.numpy import isclose as isclose
from keras.src.ops.numpy import isfinite as isfinite
from keras.src.ops.numpy import isin as isin
from keras.src.ops.numpy import isinf as isinf
from keras.src.ops.numpy import isnan as isnan
from keras.src.ops.numpy import isneginf as isneginf
from keras.src.ops.numpy import isposinf as isposinf
from keras.src.ops.numpy import isreal as isreal
from keras.src.ops.numpy import kaiser as kaiser
from keras.src.ops.numpy import kron as kron
from keras.src.ops.numpy import lcm as lcm
from keras.src.ops.numpy import ldexp as ldexp
from keras.src.ops.numpy import left_shift as left_shift
from keras.src.ops.numpy import less as less
from keras.src.ops.numpy import less_equal as less_equal
from keras.src.ops.numpy import linspace as linspace
from keras.src.ops.numpy import log as log
from keras.src.ops.numpy import log1p as log1p
from keras.src.ops.numpy import log2 as log2
from keras.src.ops.numpy import log10 as log10
from keras.src.ops.numpy import logaddexp as logaddexp
from keras.src.ops.numpy import logaddexp2 as logaddexp2
from keras.src.ops.numpy import logical_and as logical_and
from keras.src.ops.numpy import logical_not as logical_not
from keras.src.ops.numpy import logical_or as logical_or
from keras.src.ops.numpy import logical_xor as logical_xor
from keras.src.ops.numpy import logspace as logspace
from keras.src.ops.numpy import matmul as matmul
from keras.src.ops.numpy import max as max
from keras.src.ops.numpy import maximum as maximum
from keras.src.ops.numpy import mean as mean
from keras.src.ops.numpy import median as median
from keras.src.ops.numpy import meshgrid as meshgrid
from keras.src.ops.numpy import min as min
from keras.src.ops.numpy import minimum as minimum
from keras.src.ops.numpy import mod as mod
from keras.src.ops.numpy import moveaxis as moveaxis
from keras.src.ops.numpy import multiply as multiply
from keras.src.ops.numpy import nan_to_num as nan_to_num
from keras.src.ops.numpy import ndim as ndim
from keras.src.ops.numpy import negative as negative
from keras.src.ops.numpy import nextafter as nextafter
from keras.src.ops.numpy import nonzero as nonzero
from keras.src.ops.numpy import not_equal as not_equal
from keras.src.ops.numpy import ones as ones
from keras.src.ops.numpy import ones_like as ones_like
from keras.src.ops.numpy import outer as outer
from keras.src.ops.numpy import pad as pad
from keras.src.ops.numpy import power as power
from keras.src.ops.numpy import prod as prod
from keras.src.ops.numpy import quantile as quantile
from keras.src.ops.numpy import ravel as ravel
from keras.src.ops.numpy import real as real
from keras.src.ops.numpy import reciprocal as reciprocal
from keras.src.ops.numpy import repeat as repeat
from keras.src.ops.numpy import reshape as reshape
from keras.src.ops.numpy import right_shift as right_shift
from keras.src.ops.numpy import roll as roll
from keras.src.ops.numpy import rot90 as rot90
from keras.src.ops.numpy import round as round
from keras.src.ops.numpy import searchsorted as searchsorted
from keras.src.ops.numpy import select as select
from keras.src.ops.numpy import sign as sign
from keras.src.ops.numpy import signbit as signbit
from keras.src.ops.numpy import sin as sin
from keras.src.ops.numpy import sinh as sinh
from keras.src.ops.numpy import size as size
from keras.src.ops.numpy import slogdet as slogdet
from keras.src.ops.numpy import sort as sort
from keras.src.ops.numpy import split as split
from keras.src.ops.numpy import sqrt as sqrt
from keras.src.ops.numpy import square as square
from keras.src.ops.numpy import squeeze as squeeze
from keras.src.ops.numpy import stack as stack
from keras.src.ops.numpy import std as std
from keras.src.ops.numpy import subtract as subtract
from keras.src.ops.numpy import sum as sum
from keras.src.ops.numpy import swapaxes as swapaxes
from keras.src.ops.numpy import take as take
from keras.src.ops.numpy import take_along_axis as take_along_axis
from keras.src.ops.numpy import tan as tan
from keras.src.ops.numpy import tanh as tanh
from keras.src.ops.numpy import tensordot as tensordot
from keras.src.ops.numpy import tile as tile
from keras.src.ops.numpy import trace as trace
from keras.src.ops.numpy import transpose as transpose
from keras.src.ops.numpy import trapezoid as trapezoid
from keras.src.ops.numpy import tri as tri
from keras.src.ops.numpy import tril as tril
from keras.src.ops.numpy import triu as triu
from keras.src.ops.numpy import true_divide as true_divide
from keras.src.ops.numpy import trunc as trunc
from keras.src.ops.numpy import unravel_index as unravel_index
from keras.src.ops.numpy import vander as vander
from keras.src.ops.numpy import var as var
from keras.src.ops.numpy import vdot as vdot
from keras.src.ops.numpy import vectorize as vectorize
from keras.src.ops.numpy import view as view
from keras.src.ops.numpy import vstack as vstack
from keras.src.ops.numpy import where as where
from keras.src.ops.numpy import zeros as zeros
from keras.src.ops.numpy import zeros_like as zeros_like
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/ops/image/__init__.py | keras/api/_tf_keras/keras/ops/image/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform as affine_transform
from keras.src.ops.image import crop_images as crop_images
from keras.src.ops.image import elastic_transform as elastic_transform
from keras.src.ops.image import extract_patches as extract_patches
from keras.src.ops.image import extract_patches_3d as extract_patches_3d
from keras.src.ops.image import gaussian_blur as gaussian_blur
from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb
from keras.src.ops.image import map_coordinates as map_coordinates
from keras.src.ops.image import pad_images as pad_images
from keras.src.ops.image import perspective_transform as perspective_transform
from keras.src.ops.image import resize as resize
from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv
from keras.src.ops.image import scale_and_translate as scale_and_translate
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/ops/linalg/__init__.py | keras/api/_tf_keras/keras/ops/linalg/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import cholesky_inverse as cholesky_inverse
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import jvp as jvp
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/ops/numpy/__init__.py | keras/api/_tf_keras/keras/ops/numpy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.numpy import abs as abs
from keras.src.ops.numpy import absolute as absolute
from keras.src.ops.numpy import add as add
from keras.src.ops.numpy import all as all
from keras.src.ops.numpy import amax as amax
from keras.src.ops.numpy import amin as amin
from keras.src.ops.numpy import angle as angle
from keras.src.ops.numpy import any as any
from keras.src.ops.numpy import append as append
from keras.src.ops.numpy import arange as arange
from keras.src.ops.numpy import arccos as arccos
from keras.src.ops.numpy import arccosh as arccosh
from keras.src.ops.numpy import arcsin as arcsin
from keras.src.ops.numpy import arcsinh as arcsinh
from keras.src.ops.numpy import arctan as arctan
from keras.src.ops.numpy import arctan2 as arctan2
from keras.src.ops.numpy import arctanh as arctanh
from keras.src.ops.numpy import argmax as argmax
from keras.src.ops.numpy import argmin as argmin
from keras.src.ops.numpy import argpartition as argpartition
from keras.src.ops.numpy import argsort as argsort
from keras.src.ops.numpy import array as array
from keras.src.ops.numpy import array_split as array_split
from keras.src.ops.numpy import average as average
from keras.src.ops.numpy import bartlett as bartlett
from keras.src.ops.numpy import bincount as bincount
from keras.src.ops.numpy import bitwise_and as bitwise_and
from keras.src.ops.numpy import bitwise_invert as bitwise_invert
from keras.src.ops.numpy import bitwise_left_shift as bitwise_left_shift
from keras.src.ops.numpy import bitwise_not as bitwise_not
from keras.src.ops.numpy import bitwise_or as bitwise_or
from keras.src.ops.numpy import bitwise_right_shift as bitwise_right_shift
from keras.src.ops.numpy import bitwise_xor as bitwise_xor
from keras.src.ops.numpy import blackman as blackman
from keras.src.ops.numpy import broadcast_to as broadcast_to
from keras.src.ops.numpy import cbrt as cbrt
from keras.src.ops.numpy import ceil as ceil
from keras.src.ops.numpy import clip as clip
from keras.src.ops.numpy import concatenate as concatenate
from keras.src.ops.numpy import conj as conj
from keras.src.ops.numpy import conjugate as conjugate
from keras.src.ops.numpy import copy as copy
from keras.src.ops.numpy import corrcoef as corrcoef
from keras.src.ops.numpy import correlate as correlate
from keras.src.ops.numpy import cos as cos
from keras.src.ops.numpy import cosh as cosh
from keras.src.ops.numpy import count_nonzero as count_nonzero
from keras.src.ops.numpy import cross as cross
from keras.src.ops.numpy import cumprod as cumprod
from keras.src.ops.numpy import cumsum as cumsum
from keras.src.ops.numpy import deg2rad as deg2rad
from keras.src.ops.numpy import diag as diag
from keras.src.ops.numpy import diagflat as diagflat
from keras.src.ops.numpy import diagonal as diagonal
from keras.src.ops.numpy import diff as diff
from keras.src.ops.numpy import digitize as digitize
from keras.src.ops.numpy import divide as divide
from keras.src.ops.numpy import divide_no_nan as divide_no_nan
from keras.src.ops.numpy import dot as dot
from keras.src.ops.numpy import einsum as einsum
from keras.src.ops.numpy import empty as empty
from keras.src.ops.numpy import empty_like as empty_like
from keras.src.ops.numpy import equal as equal
from keras.src.ops.numpy import exp as exp
from keras.src.ops.numpy import exp2 as exp2
from keras.src.ops.numpy import expand_dims as expand_dims
from keras.src.ops.numpy import expm1 as expm1
from keras.src.ops.numpy import eye as eye
from keras.src.ops.numpy import flip as flip
from keras.src.ops.numpy import floor as floor
from keras.src.ops.numpy import floor_divide as floor_divide
from keras.src.ops.numpy import full as full
from keras.src.ops.numpy import full_like as full_like
from keras.src.ops.numpy import gcd as gcd
from keras.src.ops.numpy import get_item as get_item
from keras.src.ops.numpy import greater as greater
from keras.src.ops.numpy import greater_equal as greater_equal
from keras.src.ops.numpy import hamming as hamming
from keras.src.ops.numpy import hanning as hanning
from keras.src.ops.numpy import heaviside as heaviside
from keras.src.ops.numpy import histogram as histogram
from keras.src.ops.numpy import hstack as hstack
from keras.src.ops.numpy import hypot as hypot
from keras.src.ops.numpy import identity as identity
from keras.src.ops.numpy import imag as imag
from keras.src.ops.numpy import inner as inner
from keras.src.ops.numpy import isclose as isclose
from keras.src.ops.numpy import isfinite as isfinite
from keras.src.ops.numpy import isin as isin
from keras.src.ops.numpy import isinf as isinf
from keras.src.ops.numpy import isnan as isnan
from keras.src.ops.numpy import isneginf as isneginf
from keras.src.ops.numpy import isposinf as isposinf
from keras.src.ops.numpy import isreal as isreal
from keras.src.ops.numpy import kaiser as kaiser
from keras.src.ops.numpy import kron as kron
from keras.src.ops.numpy import lcm as lcm
from keras.src.ops.numpy import ldexp as ldexp
from keras.src.ops.numpy import left_shift as left_shift
from keras.src.ops.numpy import less as less
from keras.src.ops.numpy import less_equal as less_equal
from keras.src.ops.numpy import linspace as linspace
from keras.src.ops.numpy import log as log
from keras.src.ops.numpy import log1p as log1p
from keras.src.ops.numpy import log2 as log2
from keras.src.ops.numpy import log10 as log10
from keras.src.ops.numpy import logaddexp as logaddexp
from keras.src.ops.numpy import logaddexp2 as logaddexp2
from keras.src.ops.numpy import logical_and as logical_and
from keras.src.ops.numpy import logical_not as logical_not
from keras.src.ops.numpy import logical_or as logical_or
from keras.src.ops.numpy import logical_xor as logical_xor
from keras.src.ops.numpy import logspace as logspace
from keras.src.ops.numpy import matmul as matmul
from keras.src.ops.numpy import max as max
from keras.src.ops.numpy import maximum as maximum
from keras.src.ops.numpy import mean as mean
from keras.src.ops.numpy import median as median
from keras.src.ops.numpy import meshgrid as meshgrid
from keras.src.ops.numpy import min as min
from keras.src.ops.numpy import minimum as minimum
from keras.src.ops.numpy import mod as mod
from keras.src.ops.numpy import moveaxis as moveaxis
from keras.src.ops.numpy import multiply as multiply
from keras.src.ops.numpy import nan_to_num as nan_to_num
from keras.src.ops.numpy import ndim as ndim
from keras.src.ops.numpy import negative as negative
from keras.src.ops.numpy import nextafter as nextafter
from keras.src.ops.numpy import nonzero as nonzero
from keras.src.ops.numpy import not_equal as not_equal
from keras.src.ops.numpy import ones as ones
from keras.src.ops.numpy import ones_like as ones_like
from keras.src.ops.numpy import outer as outer
from keras.src.ops.numpy import pad as pad
from keras.src.ops.numpy import power as power
from keras.src.ops.numpy import prod as prod
from keras.src.ops.numpy import quantile as quantile
from keras.src.ops.numpy import ravel as ravel
from keras.src.ops.numpy import real as real
from keras.src.ops.numpy import reciprocal as reciprocal
from keras.src.ops.numpy import repeat as repeat
from keras.src.ops.numpy import reshape as reshape
from keras.src.ops.numpy import right_shift as right_shift
from keras.src.ops.numpy import roll as roll
from keras.src.ops.numpy import rot90 as rot90
from keras.src.ops.numpy import round as round
from keras.src.ops.numpy import searchsorted as searchsorted
from keras.src.ops.numpy import select as select
from keras.src.ops.numpy import sign as sign
from keras.src.ops.numpy import signbit as signbit
from keras.src.ops.numpy import sin as sin
from keras.src.ops.numpy import sinh as sinh
from keras.src.ops.numpy import size as size
from keras.src.ops.numpy import slogdet as slogdet
from keras.src.ops.numpy import sort as sort
from keras.src.ops.numpy import split as split
from keras.src.ops.numpy import sqrt as sqrt
from keras.src.ops.numpy import square as square
from keras.src.ops.numpy import squeeze as squeeze
from keras.src.ops.numpy import stack as stack
from keras.src.ops.numpy import std as std
from keras.src.ops.numpy import subtract as subtract
from keras.src.ops.numpy import sum as sum
from keras.src.ops.numpy import swapaxes as swapaxes
from keras.src.ops.numpy import take as take
from keras.src.ops.numpy import take_along_axis as take_along_axis
from keras.src.ops.numpy import tan as tan
from keras.src.ops.numpy import tanh as tanh
from keras.src.ops.numpy import tensordot as tensordot
from keras.src.ops.numpy import tile as tile
from keras.src.ops.numpy import trace as trace
from keras.src.ops.numpy import transpose as transpose
from keras.src.ops.numpy import trapezoid as trapezoid
from keras.src.ops.numpy import tri as tri
from keras.src.ops.numpy import tril as tril
from keras.src.ops.numpy import triu as triu
from keras.src.ops.numpy import true_divide as true_divide
from keras.src.ops.numpy import trunc as trunc
from keras.src.ops.numpy import unravel_index as unravel_index
from keras.src.ops.numpy import vander as vander
from keras.src.ops.numpy import var as var
from keras.src.ops.numpy import vdot as vdot
from keras.src.ops.numpy import vectorize as vectorize
from keras.src.ops.numpy import view as view
from keras.src.ops.numpy import vstack as vstack
from keras.src.ops.numpy import where as where
from keras.src.ops.numpy import zeros as zeros
from keras.src.ops.numpy import zeros_like as zeros_like
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/ops/nn/__init__.py | keras/api/_tf_keras/keras/ops/nn/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import adaptive_average_pool as adaptive_average_pool
from keras.src.ops.nn import adaptive_max_pool as adaptive_max_pool
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
from keras.src.ops.nn import unfold as unfold
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/_tf_keras/keras/quantizers/__init__.py | keras/api/_tf_keras/keras/quantizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.gptq_config import GPTQConfig as GPTQConfig
from keras.src.quantizers.quantization_config import (
Float8QuantizationConfig as Float8QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
Int4QuantizationConfig as Int4QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
Int8QuantizationConfig as Int8QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
QuantizationConfig as QuantizationConfig,
)
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import pack_int4 as pack_int4
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
from keras.src.quantizers.quantizers import unpack_int4 as unpack_int4
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/config/__init__.py | keras/api/config/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
enable_flash_attention as enable_flash_attention,
)
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import (
is_flash_attention_enabled as is_flash_attention_enabled,
)
from keras.src.backend.config import is_nnx_enabled as is_nnx_enabled
from keras.src.backend.config import max_epochs as max_epochs
from keras.src.backend.config import max_steps_per_epoch as max_steps_per_epoch
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.backend.config import set_max_epochs as set_max_epochs
from keras.src.backend.config import (
set_max_steps_per_epoch as set_max_steps_per_epoch,
)
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.saving.serialization_lib import (
enable_unsafe_deserialization as enable_unsafe_deserialization,
)
from keras.src.utils.backend_utils import set_backend as set_backend
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.traceback_utils import (
disable_traceback_filtering as disable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
enable_traceback_filtering as enable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
is_traceback_filtering_enabled as is_traceback_filtering_enabled,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/export/__init__.py | keras/api/export/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive as ExportArchive
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/random/__init__.py | keras/api/random/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta as beta
from keras.src.random.random import binomial as binomial
from keras.src.random.random import categorical as categorical
from keras.src.random.random import dropout as dropout
from keras.src.random.random import gamma as gamma
from keras.src.random.random import normal as normal
from keras.src.random.random import randint as randint
from keras.src.random.random import shuffle as shuffle
from keras.src.random.random import truncated_normal as truncated_normal
from keras.src.random.random import uniform as uniform
from keras.src.random.seed_generator import SeedGenerator as SeedGenerator
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/tree/__init__.py | keras/api/tree/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths as assert_same_paths
from keras.src.tree.tree_api import (
assert_same_structure as assert_same_structure,
)
from keras.src.tree.tree_api import flatten as flatten
from keras.src.tree.tree_api import flatten_with_path as flatten_with_path
from keras.src.tree.tree_api import is_nested as is_nested
from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure as map_shape_structure
from keras.src.tree.tree_api import map_structure as map_structure
from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as
from keras.src.tree.tree_api import traverse as traverse
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/ops/__init__.py | keras/api/ops/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.ops import image as image
from keras.ops import linalg as linalg
from keras.ops import nn as nn
from keras.ops import numpy as numpy
from keras.src.ops.core import associative_scan as associative_scan
from keras.src.ops.core import cast as cast
from keras.src.ops.core import cond as cond
from keras.src.ops.core import convert_to_numpy as convert_to_numpy
from keras.src.ops.core import convert_to_tensor as convert_to_tensor
from keras.src.ops.core import custom_gradient as custom_gradient
from keras.src.ops.core import dtype as dtype
from keras.src.ops.core import fori_loop as fori_loop
from keras.src.ops.core import is_tensor as is_tensor
from keras.src.ops.core import map as map
from keras.src.ops.core import saturate_cast as saturate_cast
from keras.src.ops.core import scan as scan
from keras.src.ops.core import scatter as scatter
from keras.src.ops.core import scatter_update as scatter_update
from keras.src.ops.core import shape as shape
from keras.src.ops.core import slice as slice
from keras.src.ops.core import slice_update as slice_update
from keras.src.ops.core import stop_gradient as stop_gradient
from keras.src.ops.core import switch as switch
from keras.src.ops.core import unstack as unstack
from keras.src.ops.core import vectorized_map as vectorized_map
from keras.src.ops.core import while_loop as while_loop
from keras.src.ops.einops import rearrange as rearrange
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import cholesky_inverse as cholesky_inverse
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import jvp as jvp
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
from keras.src.ops.math import erf as erf
from keras.src.ops.math import erfinv as erfinv
from keras.src.ops.math import extract_sequences as extract_sequences
from keras.src.ops.math import fft as fft
from keras.src.ops.math import fft2 as fft2
from keras.src.ops.math import ifft2 as ifft2
from keras.src.ops.math import in_top_k as in_top_k
from keras.src.ops.math import irfft as irfft
from keras.src.ops.math import istft as istft
from keras.src.ops.math import logdet as logdet
from keras.src.ops.math import logsumexp as logsumexp
from keras.src.ops.math import rfft as rfft
from keras.src.ops.math import rsqrt as rsqrt
from keras.src.ops.math import segment_max as segment_max
from keras.src.ops.math import segment_sum as segment_sum
from keras.src.ops.math import stft as stft
from keras.src.ops.math import top_k as top_k
from keras.src.ops.math import view_as_complex as view_as_complex
from keras.src.ops.math import view_as_real as view_as_real
from keras.src.ops.nn import adaptive_average_pool as adaptive_average_pool
from keras.src.ops.nn import adaptive_max_pool as adaptive_max_pool
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
from keras.src.ops.nn import unfold as unfold
from keras.src.ops.numpy import abs as abs
from keras.src.ops.numpy import absolute as absolute
from keras.src.ops.numpy import add as add
from keras.src.ops.numpy import all as all
from keras.src.ops.numpy import amax as amax
from keras.src.ops.numpy import amin as amin
from keras.src.ops.numpy import angle as angle
from keras.src.ops.numpy import any as any
from keras.src.ops.numpy import append as append
from keras.src.ops.numpy import arange as arange
from keras.src.ops.numpy import arccos as arccos
from keras.src.ops.numpy import arccosh as arccosh
from keras.src.ops.numpy import arcsin as arcsin
from keras.src.ops.numpy import arcsinh as arcsinh
from keras.src.ops.numpy import arctan as arctan
from keras.src.ops.numpy import arctan2 as arctan2
from keras.src.ops.numpy import arctanh as arctanh
from keras.src.ops.numpy import argmax as argmax
from keras.src.ops.numpy import argmin as argmin
from keras.src.ops.numpy import argpartition as argpartition
from keras.src.ops.numpy import argsort as argsort
from keras.src.ops.numpy import array as array
from keras.src.ops.numpy import array_split as array_split
from keras.src.ops.numpy import average as average
from keras.src.ops.numpy import bartlett as bartlett
from keras.src.ops.numpy import bincount as bincount
from keras.src.ops.numpy import bitwise_and as bitwise_and
from keras.src.ops.numpy import bitwise_invert as bitwise_invert
from keras.src.ops.numpy import bitwise_left_shift as bitwise_left_shift
from keras.src.ops.numpy import bitwise_not as bitwise_not
from keras.src.ops.numpy import bitwise_or as bitwise_or
from keras.src.ops.numpy import bitwise_right_shift as bitwise_right_shift
from keras.src.ops.numpy import bitwise_xor as bitwise_xor
from keras.src.ops.numpy import blackman as blackman
from keras.src.ops.numpy import broadcast_to as broadcast_to
from keras.src.ops.numpy import cbrt as cbrt
from keras.src.ops.numpy import ceil as ceil
from keras.src.ops.numpy import clip as clip
from keras.src.ops.numpy import concatenate as concatenate
from keras.src.ops.numpy import conj as conj
from keras.src.ops.numpy import conjugate as conjugate
from keras.src.ops.numpy import copy as copy
from keras.src.ops.numpy import corrcoef as corrcoef
from keras.src.ops.numpy import correlate as correlate
from keras.src.ops.numpy import cos as cos
from keras.src.ops.numpy import cosh as cosh
from keras.src.ops.numpy import count_nonzero as count_nonzero
from keras.src.ops.numpy import cross as cross
from keras.src.ops.numpy import cumprod as cumprod
from keras.src.ops.numpy import cumsum as cumsum
from keras.src.ops.numpy import deg2rad as deg2rad
from keras.src.ops.numpy import diag as diag
from keras.src.ops.numpy import diagflat as diagflat
from keras.src.ops.numpy import diagonal as diagonal
from keras.src.ops.numpy import diff as diff
from keras.src.ops.numpy import digitize as digitize
from keras.src.ops.numpy import divide as divide
from keras.src.ops.numpy import divide_no_nan as divide_no_nan
from keras.src.ops.numpy import dot as dot
from keras.src.ops.numpy import einsum as einsum
from keras.src.ops.numpy import empty as empty
from keras.src.ops.numpy import empty_like as empty_like
from keras.src.ops.numpy import equal as equal
from keras.src.ops.numpy import exp as exp
from keras.src.ops.numpy import exp2 as exp2
from keras.src.ops.numpy import expand_dims as expand_dims
from keras.src.ops.numpy import expm1 as expm1
from keras.src.ops.numpy import eye as eye
from keras.src.ops.numpy import flip as flip
from keras.src.ops.numpy import floor as floor
from keras.src.ops.numpy import floor_divide as floor_divide
from keras.src.ops.numpy import full as full
from keras.src.ops.numpy import full_like as full_like
from keras.src.ops.numpy import gcd as gcd
from keras.src.ops.numpy import get_item as get_item
from keras.src.ops.numpy import greater as greater
from keras.src.ops.numpy import greater_equal as greater_equal
from keras.src.ops.numpy import hamming as hamming
from keras.src.ops.numpy import hanning as hanning
from keras.src.ops.numpy import heaviside as heaviside
from keras.src.ops.numpy import histogram as histogram
from keras.src.ops.numpy import hstack as hstack
from keras.src.ops.numpy import hypot as hypot
from keras.src.ops.numpy import identity as identity
from keras.src.ops.numpy import imag as imag
from keras.src.ops.numpy import inner as inner
from keras.src.ops.numpy import isclose as isclose
from keras.src.ops.numpy import isfinite as isfinite
from keras.src.ops.numpy import isin as isin
from keras.src.ops.numpy import isinf as isinf
from keras.src.ops.numpy import isnan as isnan
from keras.src.ops.numpy import isneginf as isneginf
from keras.src.ops.numpy import isposinf as isposinf
from keras.src.ops.numpy import isreal as isreal
from keras.src.ops.numpy import kaiser as kaiser
from keras.src.ops.numpy import kron as kron
from keras.src.ops.numpy import lcm as lcm
from keras.src.ops.numpy import ldexp as ldexp
from keras.src.ops.numpy import left_shift as left_shift
from keras.src.ops.numpy import less as less
from keras.src.ops.numpy import less_equal as less_equal
from keras.src.ops.numpy import linspace as linspace
from keras.src.ops.numpy import log as log
from keras.src.ops.numpy import log1p as log1p
from keras.src.ops.numpy import log2 as log2
from keras.src.ops.numpy import log10 as log10
from keras.src.ops.numpy import logaddexp as logaddexp
from keras.src.ops.numpy import logaddexp2 as logaddexp2
from keras.src.ops.numpy import logical_and as logical_and
from keras.src.ops.numpy import logical_not as logical_not
from keras.src.ops.numpy import logical_or as logical_or
from keras.src.ops.numpy import logical_xor as logical_xor
from keras.src.ops.numpy import logspace as logspace
from keras.src.ops.numpy import matmul as matmul
from keras.src.ops.numpy import max as max
from keras.src.ops.numpy import maximum as maximum
from keras.src.ops.numpy import mean as mean
from keras.src.ops.numpy import median as median
from keras.src.ops.numpy import meshgrid as meshgrid
from keras.src.ops.numpy import min as min
from keras.src.ops.numpy import minimum as minimum
from keras.src.ops.numpy import mod as mod
from keras.src.ops.numpy import moveaxis as moveaxis
from keras.src.ops.numpy import multiply as multiply
from keras.src.ops.numpy import nan_to_num as nan_to_num
from keras.src.ops.numpy import ndim as ndim
from keras.src.ops.numpy import negative as negative
from keras.src.ops.numpy import nextafter as nextafter
from keras.src.ops.numpy import nonzero as nonzero
from keras.src.ops.numpy import not_equal as not_equal
from keras.src.ops.numpy import ones as ones
from keras.src.ops.numpy import ones_like as ones_like
from keras.src.ops.numpy import outer as outer
from keras.src.ops.numpy import pad as pad
from keras.src.ops.numpy import power as power
from keras.src.ops.numpy import prod as prod
from keras.src.ops.numpy import quantile as quantile
from keras.src.ops.numpy import ravel as ravel
from keras.src.ops.numpy import real as real
from keras.src.ops.numpy import reciprocal as reciprocal
from keras.src.ops.numpy import repeat as repeat
from keras.src.ops.numpy import reshape as reshape
from keras.src.ops.numpy import right_shift as right_shift
from keras.src.ops.numpy import roll as roll
from keras.src.ops.numpy import rot90 as rot90
from keras.src.ops.numpy import round as round
from keras.src.ops.numpy import searchsorted as searchsorted
from keras.src.ops.numpy import select as select
from keras.src.ops.numpy import sign as sign
from keras.src.ops.numpy import signbit as signbit
from keras.src.ops.numpy import sin as sin
from keras.src.ops.numpy import sinh as sinh
from keras.src.ops.numpy import size as size
from keras.src.ops.numpy import slogdet as slogdet
from keras.src.ops.numpy import sort as sort
from keras.src.ops.numpy import split as split
from keras.src.ops.numpy import sqrt as sqrt
from keras.src.ops.numpy import square as square
from keras.src.ops.numpy import squeeze as squeeze
from keras.src.ops.numpy import stack as stack
from keras.src.ops.numpy import std as std
from keras.src.ops.numpy import subtract as subtract
from keras.src.ops.numpy import sum as sum
from keras.src.ops.numpy import swapaxes as swapaxes
from keras.src.ops.numpy import take as take
from keras.src.ops.numpy import take_along_axis as take_along_axis
from keras.src.ops.numpy import tan as tan
from keras.src.ops.numpy import tanh as tanh
from keras.src.ops.numpy import tensordot as tensordot
from keras.src.ops.numpy import tile as tile
from keras.src.ops.numpy import trace as trace
from keras.src.ops.numpy import transpose as transpose
from keras.src.ops.numpy import trapezoid as trapezoid
from keras.src.ops.numpy import tri as tri
from keras.src.ops.numpy import tril as tril
from keras.src.ops.numpy import triu as triu
from keras.src.ops.numpy import true_divide as true_divide
from keras.src.ops.numpy import trunc as trunc
from keras.src.ops.numpy import unravel_index as unravel_index
from keras.src.ops.numpy import vander as vander
from keras.src.ops.numpy import var as var
from keras.src.ops.numpy import vdot as vdot
from keras.src.ops.numpy import vectorize as vectorize
from keras.src.ops.numpy import view as view
from keras.src.ops.numpy import vstack as vstack
from keras.src.ops.numpy import where as where
from keras.src.ops.numpy import zeros as zeros
from keras.src.ops.numpy import zeros_like as zeros_like
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/ops/image/__init__.py | keras/api/ops/image/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform as affine_transform
from keras.src.ops.image import crop_images as crop_images
from keras.src.ops.image import elastic_transform as elastic_transform
from keras.src.ops.image import extract_patches as extract_patches
from keras.src.ops.image import extract_patches_3d as extract_patches_3d
from keras.src.ops.image import gaussian_blur as gaussian_blur
from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb
from keras.src.ops.image import map_coordinates as map_coordinates
from keras.src.ops.image import pad_images as pad_images
from keras.src.ops.image import perspective_transform as perspective_transform
from keras.src.ops.image import resize as resize
from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv
from keras.src.ops.image import scale_and_translate as scale_and_translate
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/ops/linalg/__init__.py | keras/api/ops/linalg/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import cholesky_inverse as cholesky_inverse
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import jvp as jvp
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/ops/numpy/__init__.py | keras/api/ops/numpy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.numpy import abs as abs
from keras.src.ops.numpy import absolute as absolute
from keras.src.ops.numpy import add as add
from keras.src.ops.numpy import all as all
from keras.src.ops.numpy import amax as amax
from keras.src.ops.numpy import amin as amin
from keras.src.ops.numpy import angle as angle
from keras.src.ops.numpy import any as any
from keras.src.ops.numpy import append as append
from keras.src.ops.numpy import arange as arange
from keras.src.ops.numpy import arccos as arccos
from keras.src.ops.numpy import arccosh as arccosh
from keras.src.ops.numpy import arcsin as arcsin
from keras.src.ops.numpy import arcsinh as arcsinh
from keras.src.ops.numpy import arctan as arctan
from keras.src.ops.numpy import arctan2 as arctan2
from keras.src.ops.numpy import arctanh as arctanh
from keras.src.ops.numpy import argmax as argmax
from keras.src.ops.numpy import argmin as argmin
from keras.src.ops.numpy import argpartition as argpartition
from keras.src.ops.numpy import argsort as argsort
from keras.src.ops.numpy import array as array
from keras.src.ops.numpy import array_split as array_split
from keras.src.ops.numpy import average as average
from keras.src.ops.numpy import bartlett as bartlett
from keras.src.ops.numpy import bincount as bincount
from keras.src.ops.numpy import bitwise_and as bitwise_and
from keras.src.ops.numpy import bitwise_invert as bitwise_invert
from keras.src.ops.numpy import bitwise_left_shift as bitwise_left_shift
from keras.src.ops.numpy import bitwise_not as bitwise_not
from keras.src.ops.numpy import bitwise_or as bitwise_or
from keras.src.ops.numpy import bitwise_right_shift as bitwise_right_shift
from keras.src.ops.numpy import bitwise_xor as bitwise_xor
from keras.src.ops.numpy import blackman as blackman
from keras.src.ops.numpy import broadcast_to as broadcast_to
from keras.src.ops.numpy import cbrt as cbrt
from keras.src.ops.numpy import ceil as ceil
from keras.src.ops.numpy import clip as clip
from keras.src.ops.numpy import concatenate as concatenate
from keras.src.ops.numpy import conj as conj
from keras.src.ops.numpy import conjugate as conjugate
from keras.src.ops.numpy import copy as copy
from keras.src.ops.numpy import corrcoef as corrcoef
from keras.src.ops.numpy import correlate as correlate
from keras.src.ops.numpy import cos as cos
from keras.src.ops.numpy import cosh as cosh
from keras.src.ops.numpy import count_nonzero as count_nonzero
from keras.src.ops.numpy import cross as cross
from keras.src.ops.numpy import cumprod as cumprod
from keras.src.ops.numpy import cumsum as cumsum
from keras.src.ops.numpy import deg2rad as deg2rad
from keras.src.ops.numpy import diag as diag
from keras.src.ops.numpy import diagflat as diagflat
from keras.src.ops.numpy import diagonal as diagonal
from keras.src.ops.numpy import diff as diff
from keras.src.ops.numpy import digitize as digitize
from keras.src.ops.numpy import divide as divide
from keras.src.ops.numpy import divide_no_nan as divide_no_nan
from keras.src.ops.numpy import dot as dot
from keras.src.ops.numpy import einsum as einsum
from keras.src.ops.numpy import empty as empty
from keras.src.ops.numpy import empty_like as empty_like
from keras.src.ops.numpy import equal as equal
from keras.src.ops.numpy import exp as exp
from keras.src.ops.numpy import exp2 as exp2
from keras.src.ops.numpy import expand_dims as expand_dims
from keras.src.ops.numpy import expm1 as expm1
from keras.src.ops.numpy import eye as eye
from keras.src.ops.numpy import flip as flip
from keras.src.ops.numpy import floor as floor
from keras.src.ops.numpy import floor_divide as floor_divide
from keras.src.ops.numpy import full as full
from keras.src.ops.numpy import full_like as full_like
from keras.src.ops.numpy import gcd as gcd
from keras.src.ops.numpy import get_item as get_item
from keras.src.ops.numpy import greater as greater
from keras.src.ops.numpy import greater_equal as greater_equal
from keras.src.ops.numpy import hamming as hamming
from keras.src.ops.numpy import hanning as hanning
from keras.src.ops.numpy import heaviside as heaviside
from keras.src.ops.numpy import histogram as histogram
from keras.src.ops.numpy import hstack as hstack
from keras.src.ops.numpy import hypot as hypot
from keras.src.ops.numpy import identity as identity
from keras.src.ops.numpy import imag as imag
from keras.src.ops.numpy import inner as inner
from keras.src.ops.numpy import isclose as isclose
from keras.src.ops.numpy import isfinite as isfinite
from keras.src.ops.numpy import isin as isin
from keras.src.ops.numpy import isinf as isinf
from keras.src.ops.numpy import isnan as isnan
from keras.src.ops.numpy import isneginf as isneginf
from keras.src.ops.numpy import isposinf as isposinf
from keras.src.ops.numpy import isreal as isreal
from keras.src.ops.numpy import kaiser as kaiser
from keras.src.ops.numpy import kron as kron
from keras.src.ops.numpy import lcm as lcm
from keras.src.ops.numpy import ldexp as ldexp
from keras.src.ops.numpy import left_shift as left_shift
from keras.src.ops.numpy import less as less
from keras.src.ops.numpy import less_equal as less_equal
from keras.src.ops.numpy import linspace as linspace
from keras.src.ops.numpy import log as log
from keras.src.ops.numpy import log1p as log1p
from keras.src.ops.numpy import log2 as log2
from keras.src.ops.numpy import log10 as log10
from keras.src.ops.numpy import logaddexp as logaddexp
from keras.src.ops.numpy import logaddexp2 as logaddexp2
from keras.src.ops.numpy import logical_and as logical_and
from keras.src.ops.numpy import logical_not as logical_not
from keras.src.ops.numpy import logical_or as logical_or
from keras.src.ops.numpy import logical_xor as logical_xor
from keras.src.ops.numpy import logspace as logspace
from keras.src.ops.numpy import matmul as matmul
from keras.src.ops.numpy import max as max
from keras.src.ops.numpy import maximum as maximum
from keras.src.ops.numpy import mean as mean
from keras.src.ops.numpy import median as median
from keras.src.ops.numpy import meshgrid as meshgrid
from keras.src.ops.numpy import min as min
from keras.src.ops.numpy import minimum as minimum
from keras.src.ops.numpy import mod as mod
from keras.src.ops.numpy import moveaxis as moveaxis
from keras.src.ops.numpy import multiply as multiply
from keras.src.ops.numpy import nan_to_num as nan_to_num
from keras.src.ops.numpy import ndim as ndim
from keras.src.ops.numpy import negative as negative
from keras.src.ops.numpy import nextafter as nextafter
from keras.src.ops.numpy import nonzero as nonzero
from keras.src.ops.numpy import not_equal as not_equal
from keras.src.ops.numpy import ones as ones
from keras.src.ops.numpy import ones_like as ones_like
from keras.src.ops.numpy import outer as outer
from keras.src.ops.numpy import pad as pad
from keras.src.ops.numpy import power as power
from keras.src.ops.numpy import prod as prod
from keras.src.ops.numpy import quantile as quantile
from keras.src.ops.numpy import ravel as ravel
from keras.src.ops.numpy import real as real
from keras.src.ops.numpy import reciprocal as reciprocal
from keras.src.ops.numpy import repeat as repeat
from keras.src.ops.numpy import reshape as reshape
from keras.src.ops.numpy import right_shift as right_shift
from keras.src.ops.numpy import roll as roll
from keras.src.ops.numpy import rot90 as rot90
from keras.src.ops.numpy import round as round
from keras.src.ops.numpy import searchsorted as searchsorted
from keras.src.ops.numpy import select as select
from keras.src.ops.numpy import sign as sign
from keras.src.ops.numpy import signbit as signbit
from keras.src.ops.numpy import sin as sin
from keras.src.ops.numpy import sinh as sinh
from keras.src.ops.numpy import size as size
from keras.src.ops.numpy import slogdet as slogdet
from keras.src.ops.numpy import sort as sort
from keras.src.ops.numpy import split as split
from keras.src.ops.numpy import sqrt as sqrt
from keras.src.ops.numpy import square as square
from keras.src.ops.numpy import squeeze as squeeze
from keras.src.ops.numpy import stack as stack
from keras.src.ops.numpy import std as std
from keras.src.ops.numpy import subtract as subtract
from keras.src.ops.numpy import sum as sum
from keras.src.ops.numpy import swapaxes as swapaxes
from keras.src.ops.numpy import take as take
from keras.src.ops.numpy import take_along_axis as take_along_axis
from keras.src.ops.numpy import tan as tan
from keras.src.ops.numpy import tanh as tanh
from keras.src.ops.numpy import tensordot as tensordot
from keras.src.ops.numpy import tile as tile
from keras.src.ops.numpy import trace as trace
from keras.src.ops.numpy import transpose as transpose
from keras.src.ops.numpy import trapezoid as trapezoid
from keras.src.ops.numpy import tri as tri
from keras.src.ops.numpy import tril as tril
from keras.src.ops.numpy import triu as triu
from keras.src.ops.numpy import true_divide as true_divide
from keras.src.ops.numpy import trunc as trunc
from keras.src.ops.numpy import unravel_index as unravel_index
from keras.src.ops.numpy import vander as vander
from keras.src.ops.numpy import var as var
from keras.src.ops.numpy import vdot as vdot
from keras.src.ops.numpy import vectorize as vectorize
from keras.src.ops.numpy import view as view
from keras.src.ops.numpy import vstack as vstack
from keras.src.ops.numpy import where as where
from keras.src.ops.numpy import zeros as zeros
from keras.src.ops.numpy import zeros_like as zeros_like
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/ops/nn/__init__.py | keras/api/ops/nn/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import adaptive_average_pool as adaptive_average_pool
from keras.src.ops.nn import adaptive_max_pool as adaptive_max_pool
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
from keras.src.ops.nn import unfold as unfold
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/quantizers/__init__.py | keras/api/quantizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.gptq_config import GPTQConfig as GPTQConfig
from keras.src.quantizers.quantization_config import (
Float8QuantizationConfig as Float8QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
Int4QuantizationConfig as Int4QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
Int8QuantizationConfig as Int8QuantizationConfig,
)
from keras.src.quantizers.quantization_config import (
QuantizationConfig as QuantizationConfig,
)
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import pack_int4 as pack_int4
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
from keras.src.quantizers.quantizers import unpack_int4 as unpack_int4
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/functional_api.py | guides/functional_api.py | """
Title: The Functional API
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/12
Description: Complete guide to the functional API.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras
from keras import layers
from keras import ops
"""
## Introduction
The Keras *functional API* is a way to create models that are more flexible
than the `keras.Sequential` API. The functional API can handle models
with non-linear topology, shared layers, and even multiple inputs or outputs.
The main idea is that a deep learning model is usually
a directed acyclic graph (DAG) of layers.
So the functional API is a way to build *graphs of layers*.
Consider the following model:
<div class="k-default-codeblock">
```
(input: 784-dimensional vectors)
↧
[Dense (64 units, relu activation)]
↧
[Dense (64 units, relu activation)]
↧
[Dense (10 units, softmax activation)]
↧
(output: logits of a probability distribution over 10 classes)
```
</div>
This is a basic graph with three layers.
To build this model using the functional API, start by creating an input node:
"""
inputs = keras.Input(shape=(784,))
"""
The shape of the data is set as a 784-dimensional vector.
The batch size is always omitted since only the shape of each sample is specified.
If, for example, you have an image input with a shape of `(32, 32, 3)`,
you would use:
"""
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
"""
The `inputs` that is returned contains information about the shape and `dtype`
of the input data that you feed to your model.
Here's the shape:
"""
inputs.shape
"""
Here's the dtype:
"""
inputs.dtype
"""
You create a new node in the graph of layers by calling a layer on this `inputs`
object:
"""
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
"""
The "layer call" action is like drawing an arrow from "inputs" to this layer
you created.
You're "passing" the inputs to the `dense` layer, and you get `x` as the output.
Let's add a few more layers to the graph of layers:
"""
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
"""
At this point, you can create a `Model` by specifying its inputs and outputs
in the graph of layers:
"""
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
"""
Let's check out what the model summary looks like:
"""
model.summary()
"""
You can also plot the model as a graph:
"""
keras.utils.plot_model(model, "my_first_model.png")
"""
And, optionally, display the input and output shapes of each layer
in the plotted graph:
"""
keras.utils.plot_model(
model, "my_first_model_with_shape_info.png", show_shapes=True
)
"""
This figure and the code are almost identical. In the code version,
the connection arrows are replaced by the call operation.
A "graph of layers" is an intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirrors this.
"""
"""
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models
built using the functional API as for `Sequential` models.
The `Model` class offers a built-in training loop (the `fit()` method)
and a built-in evaluation loop (the `evaluate()` method). Note
that you can easily [customize these loops](/guides/customizing_what_happens_in_fit/)
to implement training routines beyond supervised learning
(e.g. [GANs](https://keras.io/examples/generative/dcgan_overriding_train_step/)).
Here, load the MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
then evaluate the model on the test data:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(
x_train, y_train, batch_size=64, epochs=2, validation_split=0.2
)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
"""
For further reading, see the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
## Save and serialize
Saving the model and serialization work the same way for models built using
the functional API as they do for `Sequential` models. The standard way
to save a functional model is to call `model.save()`
to save the entire model as a single file. You can later recreate the same model
from this file, even if the code that built the model is no longer available.
This saved file includes the:
- model architecture
- model weight values (that were learned during training)
- model training config, if any (as passed to `compile()`)
- optimizer and its state, if any (to restart training where you left off)
"""
model.save("my_model.keras")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("my_model.keras")
"""
For details, read the model [serialization & saving](
/guides/serialization_and_saving/) guide.
"""
"""
## Use the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single
graph of layers can be used to generate multiple models.
In the example below, you use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
"""
Here, the decoding architecture is strictly symmetrical
to the encoding architecture, so the output shape is the same as
the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer,
and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
"""
"""
## All models are callable, just like layers
You can treat any model as if it were a layer by invoking it on an `Input` or
on the output of another layer. By calling a model you aren't just reusing
the architecture of the model, you're also reusing its weights.
To see this in action, here's a different take on the autoencoder example that
creates an encoder model, a decoder model, and chains them in two calls
to obtain the autoencoder model:
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
"""
As you can see, the model can be nested: a model can contain sub-models
(since a model is just like a layer).
A common use case for model nesting is *ensembling*.
For example, here's how to ensemble a set of models into a single model
that averages their predictions:
"""
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
"""
## Manipulate complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the `Sequential` API.
For example, if you're building a system for ranking customer issue tickets by
priority and routing them to the correct department,
then the model will have three inputs:
- the title of the ticket (text input),
- the text body of the ticket (text input), and
- any tags added by the user (categorical input)
This model will have two outputs:
- the priority score between 0 and 1 (scalar sigmoid output), and
- the department that should handle the ticket (softmax output
over the set of departments).
You can build this model in a few lines with the functional API:
"""
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(
shape=(None,), name="body"
) # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs={"priority": priority_pred, "department": department_pred},
)
"""
Now plot the model:
"""
keras.utils.plot_model(
model, "multi_input_and_output_model.png", show_shapes=True
)
"""
When compiling this model, you can assign different losses to each output.
You can even assign different weights to each loss -- to modulate
their contribution to the total training loss.
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
"""
Since the output layers have different names, you could also specify
the losses and loss weights with the corresponding layer names:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
"""
Train the model by passing lists of NumPy arrays of inputs and targets:
"""
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
"""
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
### A toy ResNet model
In addition to models with multiple inputs and outputs,
the functional API makes it easy to manipulate non-linear connectivity
topologies -- these are models with layers that are not connected sequentially,
which the `Sequential` API cannot handle.
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this:
"""
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
"""
Plot the model:
"""
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
"""
Now train the model:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(
x_train[:1000],
y_train[:1000],
batch_size=64,
epochs=1,
validation_split=0.2,
)
"""
## Shared layers
Another good use for the functional API are models that use *shared layers*.
Shared layers are layer instances that are reused multiple times in the same model --
they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs from similar spaces
(say, two different pieces of text that feature similar vocabulary).
They enable sharing of information across these different inputs,
and they make it possible to train such a model on less data.
If a given word is seen in one of the inputs,
that will benefit the processing of all inputs that pass through the shared layer.
To share a layer in the functional API, call the same layer instance multiple times.
For instance, here's an `Embedding` layer shared across two different text inputs:
"""
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""
## Extract and reuse nodes in the graph of layers
Because the graph of layers you are manipulating is a static data structure,
it can be accessed and inspected. And this is how you are able to plot
functional models as images.
This also means that you can access the activations of intermediate layers
("nodes" in the graph) and reuse them elsewhere --
which is very useful for something like feature extraction.
Let's look at an example. This is a VGG19 model with weights pretrained on ImageNet:
"""
vgg19 = keras.applications.VGG19()
"""
And these are the intermediate activations of the model,
obtained by querying the graph data structure:
"""
features_list = [layer.output for layer in vgg19.layers]
"""
Use these features to create a new feature-extraction model that returns
the values of the intermediate layer activations:
"""
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
"""
This comes in handy for tasks like
[neural style transfer](https://keras.io/examples/generative/neural_style_transfer/),
among other things.
"""
"""
## Extend the API using custom layers
`keras` includes a wide range of built-in layers, for example:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
But if you don't find what you need, it's easy to extend the API by creating
your own layers. All layers subclass the `Layer` class and implement:
- `call` method, that specifies the computation done by the layer.
- `build` method, that creates the weights of the layer (this is just a style
convention since you can create weights in `__init__`, as well).
To learn more about creating layers from scratch, read
[custom layers and models](/guides/making_new_layers_and_models_via_subclassing) guide.
The following is a basic implementation of `keras.layers.Dense`:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
"""
For serialization support in your custom layer, define a `get_config()`
method that returns the constructor arguments of the layer instance:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(
config, custom_objects={"CustomDense": CustomDense}
)
"""
Optionally, implement the class method `from_config(cls, config)` which is used
when recreating a layer instance given its config dictionary.
The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
"""
"""
## When to use the functional API
Should you use the Keras functional API to create a new model,
or just subclass the `Model` class directly? In general, the functional API
is higher-level, easier and safer, and has a number of
features that subclassed models do not support.
However, model subclassing provides greater flexibility when building models
that are not easily expressible as directed acyclic graphs of layers.
For example, you could not implement a Tree-RNN with the functional API
and would have to subclass `Model` directly.
For an in-depth look at the differences between the functional API and
model subclassing, read
[What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
### Functional API strengths:
The following properties are also true for Sequential models
(which are also data structures), but are not true for subclassed models
(which are Python bytecode, not data structures).
#### Less verbose
There is no `super().__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(ops.zeros((1, 32)))
```
#### Model validation while defining its connectivity graph
In the functional API, the input specification (shape and dtype) is created
in advance (using `Input`). Every time you call a layer,
the layer checks that the specification passed to it matches its assumptions,
and it will raise a helpful error message if not.
This guarantees that any model you can build with the functional API will run.
All debugging -- other than convergence-related debugging --
happens statically during the model construction and not at execution time.
This is similar to type checking in a compiler.
#### A functional model is plottable and inspectable
You can plot the model as a graph, and you can easily access intermediate nodes
in this graph. For example, to extract and reuse the activations of intermediate
layers (as seen in a previous example):
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### A functional model can be serialized or cloned
Because a functional model is a data structure rather than a piece of code,
it is safely serializable and can be saved as a single file
that allows you to recreate the exact same model
without having access to any of the original code.
See the [serialization & saving guide](/guides/serialization_and_saving/).
To serialize a subclassed model, it is necessary for the implementer
to specify a `get_config()`
and `from_config()` method at the model level.
### Functional API weakness:
#### It does not support dynamic architectures
The functional API treats models as DAGs of layers.
This is true for most deep learning architectures, but not all -- for example,
recursive networks or Tree RNNs do not follow this assumption and cannot
be implemented in the functional API.
"""
"""
## Mix-and-match API styles
Choosing between the functional API or Model subclassing isn't a
binary decision that restricts you into one category of models.
All models in the `keras` API can interact with each other, whether they're
`Sequential` models, functional models, or subclassed models that are written
from scratch.
You can always use a functional model or `Sequential` model
as part of a subclassed model or layer:
"""
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, timesteps, input_dim)))
"""
You can use any subclassed layer or model in the functional API
as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` --
Where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors),
and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` --
Where `training` is a boolean indicating whether the layer should behave
in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` --
Where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` --
Of course, you can have both masking and training-specific behavior at the same time.
Additionally, if you implement the `get_config` method on your custom Layer or model,
the functional models you create will still be serializable and cloneable.
Here's a quick example of a custom RNN, written from scratch,
being used in a functional model:
"""
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, 10, 5)))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/writing_a_custom_training_loop_in_torch.py | guides/writing_a_custom_training_loop_in_torch.py | """
Title: Writing a training loop from scratch in PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/25
Last modified: 2023/06/25
Description: Writing low-level training & evaluation loops in PyTorch.
Accelerator: None
"""
"""
## Setup
"""
import os
# This guide can only be run with the torch backend.
os.environ["KERAS_BACKEND"] = "torch"
import torch
import keras
import numpy as np
"""
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](https://keras.io/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`.
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
"""
"""
## A first end-to-end example
To write a custom training loop, we need the following ingredients:
- A model to train, of course.
- An optimizer. You could either use a `keras.optimizers` optimizer,
or a native PyTorch optimizer from `torch.optim`.
- A loss function. You could either use a `keras.losses` loss,
or a native PyTorch loss from `torch.nn`.
- A dataset. You could use any format: a `tf.data.Dataset`,
a PyTorch `DataLoader`, a Python generator, etc.
Let's line them up. We'll use torch-native objects in each case --
except, of course, for the Keras model.
First, let's get the model and the MNIST dataset:
"""
# Let's consider a simple MNIST model
def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
# Create load up the MNIST dataset and put it in a torch DataLoader
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)).astype("float32")
x_test = np.reshape(x_test, (-1, 784)).astype("float32")
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Create torch Datasets
train_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_val), torch.from_numpy(y_val)
)
# Create DataLoaders for the Datasets
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)
"""
Next, here's our PyTorch optimizer and our PyTorch loss function:
"""
# Instantiate a torch optimizer
model = get_model()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate a torch loss function
loss_fn = torch.nn.CrossEntropyLoss()
"""
Let's train our model using mini-batch gradient with a custom training loop.
Calling `loss.backward()` on a loss tensor triggers backpropagation.
Once that's done, your optimizer is magically aware of the gradients for each variable
and can update its variables, which is done via `optimizer.step()`.
Tensors, variables, optimizers are all interconnected to one another via hidden global state.
Also, don't forget to call `model.zero_grad()` before `loss.backward()`, or you won't
get the right gradients for your variables.
Here's our training loop, step by step:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we call the model on the input data to retrieve the predictions,
then we use them to compute a loss value
- We call `loss.backward()` to
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
"""
epochs = 3
for epoch in range(epochs):
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(logits, targets)
# Backward pass
model.zero_grad()
loss.backward()
# Optimizer variable updates
optimizer.step()
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
"""
As an alternative, let's look at what the loop looks like when using a Keras optimizer
and a Keras loss function.
Important differences:
- You retrieve the gradients for the variables via `v.value.grad`,
called on each trainable variable.
- You update your variables via `optimizer.apply()`, which must be
called in a `torch.no_grad()` scope.
**Also, a big gotcha:** while all NumPy/TensorFlow/JAX/Keras APIs
as well as Python `unittest` APIs use the argument order convention
`fn(y_true, y_pred)` (reference values first, predicted values second),
PyTorch actually uses `fn(y_pred, y_true)` for its losses.
So make sure to invert the order of `logits` and `targets`.
"""
model = get_model()
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
"""
## Low-level handling of metrics
Let's add metrics monitoring to this basic training loop.
You can readily reuse built-in Keras metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_state()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `CategoricalAccuracy` on training and
validation data at the end of each epoch:
"""
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
"""
Here's our training & evaluation loop:
"""
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
"""
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
"""
class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * torch.sum(inputs))
return inputs
"""
Let's build a really simple model that uses it:
"""
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what our training loop should look like now:
"""
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
if model.losses:
loss = loss + torch.sum(*model.losses)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
"""
That's it!
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/custom_train_step_in_torch.py | guides/custom_train_step_in_torch.py | """
Title: Customizing what happens in `fit()` with PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/27
Last modified: 2023/06/27
Description: Overriding the training step of the Model class with PyTorch.
Accelerator: GPU
"""
"""
## Introduction
When you're doing supervised learning, you can use `fit()` and everything works
smoothly.
When you need to take control of every little detail, you can write your own training
loop entirely from scratch.
But what if you need a custom training algorithm, but you still want to benefit from
the convenient features of `fit()`, such as callbacks, built-in distribution support,
or step fusing?
A core principle of Keras is **progressive disclosure of complexity**. You should
always be able to get into lower-level workflows in a gradual way. You shouldn't fall
off a cliff if the high-level functionality doesn't exactly match your use case. You
should be able to gain more control over the small details while retaining a
commensurate amount of high-level convenience.
When you need to customize what `fit()` does, you should **override the training step
function of the `Model` class**. This is the function that is called by `fit()` for
every batch of data. You will then be able to call `fit()` as usual -- and it will be
running your own learning algorithm.
Note that this pattern does not prevent you from building models with the Functional
API. You can do this whether you're building `Sequential` models, Functional API
models, or subclassed models.
Let's see how that works.
"""
"""
## Setup
"""
import os
# This guide can only be run with the torch backend.
os.environ["KERAS_BACKEND"] = "torch"
import torch
import keras
from keras import layers
import numpy as np
"""
## A first simple example
Let's start from a simple example:
- We create a new class that subclasses `keras.Model`.
- We just override the method `train_step(self, data)`.
- We return a dictionary mapping metric names (including the loss) to their current
value.
The input argument `data` is what gets passed to fit as training data:
- If you pass NumPy arrays, by calling `fit(x, y, ...)`, then `data` will be the tuple
`(x, y)`
- If you pass a `torch.utils.data.DataLoader` or a `tf.data.Dataset`,
by calling `fit(dataset, ...)`, then `data` will be what gets yielded
by `dataset` at each batch.
In the body of the `train_step()` method, we implement a regular training update,
similar to what you are already familiar with. Importantly, **we compute the loss via
`self.compute_loss()`**, which wraps the loss(es) function(s) that were passed to
`compile()`.
Similarly, we call `metric.update_state(y, y_pred)` on metrics from `self.metrics`,
to update the state of the metrics that were passed in `compile()`,
and we query results from `self.metrics` at the end to retrieve their current value.
"""
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
# Compute loss
y_pred = self(x, training=True) # Forward pass
loss = self.compute_loss(y=y, y_pred=y_pred)
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = [v for v in self.trainable_weights]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(y, y_pred)
# Return a dict mapping metric names to current value
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
"""
Let's try this out:
"""
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# Just use `fit` as usual
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
"""
## Going lower-level
Naturally, you could just skip passing a loss function in `compile()`, and instead do
everything *manually* in `train_step`. Likewise for metrics.
Here's a lower-level example, that only uses `compile()` to configure the optimizer:
- We start by creating `Metric` instances to track our loss and a MAE score (in `__init__()`).
- We implement a custom `train_step()` that updates the state of these metrics
(by calling `update_state()` on them), then query them (via `result()`) to return their current average value,
to be displayed by the progress bar and to be pass to any callback.
- Note that we would need to call `reset_states()` on our metrics between each epoch! Otherwise
calling `result()` would return an average since the start of training, whereas we usually work
with per-epoch averages. Thankfully, the framework can do that for us: just list any metric
you want to reset in the `metrics` property of the model. The model will call `reset_states()`
on any object listed here at the beginning of each `fit()` epoch or at the beginning of a call to
`evaluate()`.
"""
class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae")
self.loss_fn = keras.losses.MeanSquaredError()
def train_step(self, data):
x, y = data
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
# Compute loss
y_pred = self(x, training=True) # Forward pass
loss = self.loss_fn(y, y_pred)
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = [v for v in self.trainable_weights]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
# Compute our own metrics
self.loss_tracker.update_state(loss)
self.mae_metric.update_state(y, y_pred)
return {
"loss": self.loss_tracker.result(),
"mae": self.mae_metric.result(),
}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
return [self.loss_tracker, self.mae_metric]
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
# We don't pass a loss or metrics here.
model.compile(optimizer="adam")
# Just use `fit` as usual -- you can use callbacks, etc.
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=5)
"""
## Supporting `sample_weight` & `class_weight`
You may have noticed that our first basic example didn't make any mention of sample
weighting. If you want to support the `fit()` arguments `sample_weight` and
`class_weight`, you'd simply do the following:
- Unpack `sample_weight` from the `data` argument
- Pass it to `compute_loss` & `update_state` (of course, you could also just apply
it manually if you don't rely on `compile()` for losses & metrics)
- That's it.
"""
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
if len(data) == 3:
x, y, sample_weight = data
else:
sample_weight = None
x, y = data
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
# Compute loss
y_pred = self(x, training=True) # Forward pass
loss = self.compute_loss(
y=y,
y_pred=y_pred,
sample_weight=sample_weight,
)
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = [v for v in self.trainable_weights]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(y, y_pred, sample_weight=sample_weight)
# Return a dict mapping metric names to current value
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# You can now use sample_weight argument
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
sw = np.random.random((1000, 1))
model.fit(x, y, sample_weight=sw, epochs=3)
"""
## Providing your own evaluation step
What if you want to do the same for calls to `model.evaluate()`? Then you would
override `test_step` in exactly the same way. Here's what it looks like:
"""
class CustomModel(keras.Model):
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
loss = self.compute_loss(y=y, y_pred=y_pred)
# Update the metrics.
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(loss="mse", metrics=["mae"])
# Evaluate with our custom test_step
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.evaluate(x, y)
"""
## Wrapping up: an end-to-end GAN example
Let's walk through an end-to-end example that leverages everything you just learned.
Let's consider:
- A generator network meant to generate 28x28x1 images.
- A discriminator network meant to classify 28x28x1 images into two classes ("fake" and
"real").
- One optimizer for each.
- A loss function to train the discriminator.
"""
# Create the discriminator
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(negative_slope=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's a feature-complete GAN class, overriding `compile()` to use its own signature,
and implementing the entire GAN algorithm in 17 lines in `train_step`:
"""
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.d_loss_tracker = keras.metrics.Mean(name="d_loss")
self.g_loss_tracker = keras.metrics.Mean(name="g_loss")
self.seed_generator = keras.random.SeedGenerator(1337)
self.built = True
@property
def metrics(self):
return [self.d_loss_tracker, self.g_loss_tracker]
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Sample random points in the latent space
batch_size = real_images.shape[0]
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
real_images = torch.tensor(real_images)
combined_images = torch.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = torch.concat(
[torch.ones((batch_size, 1)), torch.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * keras.random.uniform(
labels.shape, seed=self.seed_generator
)
# Train the discriminator
self.zero_grad()
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
d_loss.backward()
grads = [v.value.grad for v in self.discriminator.trainable_weights]
with torch.no_grad():
self.d_optimizer.apply(grads, self.discriminator.trainable_weights)
# Sample random points in the latent space
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Assemble labels that say "all real images"
misleading_labels = torch.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
self.zero_grad()
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = g_loss.backward()
grads = [v.value.grad for v in self.generator.trainable_weights]
with torch.no_grad():
self.g_optimizer.apply(grads, self.generator.trainable_weights)
# Update metrics and return their value.
self.d_loss_tracker.update_state(d_loss)
self.g_loss_tracker.update_state(g_loss)
return {
"d_loss": self.d_loss_tracker.result(),
"g_loss": self.g_loss_tracker.result(),
}
"""
Let's test-drive it:
"""
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(all_digits), torch.from_numpy(all_digits)
)
# Create a DataLoader
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=True
)
gan = GAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
gan.fit(dataloader, epochs=1)
"""
The ideas behind deep learning are simple, so why should their implementation be painful?
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/writing_a_custom_training_loop_in_tensorflow.py | guides/writing_a_custom_training_loop_in_tensorflow.py | """
Title: Writing a training loop from scratch in TensorFlow
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2023/06/25
Description: Writing low-level training & evaluation loops in TensorFlow.
Accelerator: None
"""
"""
## Setup
"""
import time
import os
# This guide can only be run with the TensorFlow backend.
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import numpy as np
"""
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](https://keras.io/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`.
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
"""
"""
## A first end-to-end example
Let's consider a simple MNIST model:
"""
def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
model = get_model()
"""
Let's train it using mini-batch gradient with a custom training loop.
First, we're going to need an optimizer, a loss function, and a dataset:
"""
# Instantiate an optimizer.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
"""
Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of
the trainable weights of the layer with respect to a loss value. Using an optimizer
instance, you can use these gradients to update these variables (which you can
retrieve using `model.trainable_weights`).
Here's our training loop, step by step:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we open a `GradientTape()` scope
- Inside this scope, we call the model (forward pass) and compute the loss
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
"""
epochs = 3
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(
x_batch_train, training=True
) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply(grads, model.trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
"""
## Low-level handling of metrics
Let's add metrics monitoring to this basic loop.
You can readily reuse the built-in metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_state()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `SparseCategoricalAccuracy` on training and
validation data at the end of each epoch:
"""
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
"""
Here's our training & evaluation loop:
"""
epochs = 2
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
print(f"Time taken: {time.time() - start_time:.2f}s")
"""
## Speeding-up your training step with `tf.function`
The default runtime in TensorFlow is eager execution.
As such, our training loop above executes eagerly.
This is great for debugging, but graph compilation has a definite performance
advantage. Describing your computation as a static graph enables the framework
to apply global performance optimizations. This is impossible when
the framework is constrained to greedily execute one operation after another,
with no knowledge of what comes next.
You can compile into a static graph any function that takes tensors as input.
Just add a `@tf.function` decorator on it, like this:
"""
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
train_acc_metric.update_state(y, logits)
return loss_value
"""
Let's do the same with the evaluation step:
"""
@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
"""
Now, let's re-run our training loop with this compiled training step:
"""
epochs = 2
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
print(f"Time taken: {time.time() - start_time:.2f}s")
"""
Much faster, isn't it?
"""
"""
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
"""
class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
"""
Let's build a really simple model that uses it:
"""
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what our training step should look like now:
"""
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
train_acc_metric.update_state(y, logits)
return loss_value
"""
## Summary
Now you know everything there is to know about using built-in training loops and
writing your own from scratch.
To conclude, here's a simple end-to-end example that ties together everything
you've learned in this guide: a DCGAN trained on MNIST digits.
"""
"""
## End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new
images that look almost real, by learning the latent distribution of a training
dataset of images (the "latent space" of the images).
A GAN is made of two parts: a "generator" model that maps points in the latent
space to points in image space, a "discriminator" model, a classifier
that can tell the difference between real images (from the training dataset)
and fake images (the output of the generator network).
A GAN training loop looks like this:
1) Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the "generator" model.
- Get a batch of real images and combine them with the generated images.
- Train the "discriminator" model to classify generated vs. real images.
2) Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the "generator" network.
- Get a batch of real images and combine them with the generated images.
- Train the "generator" model to "fool" the discriminator and classify the fake images
as real.
For a much more detailed overview of how GANs works, see
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
Let's implement this training loop. First, create the discriminator meant to classify
fake vs real digits:
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.GlobalMaxPooling2D(),
keras.layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
"""
Then let's create a generator network,
that turns latent vectors into outputs of shape `(28, 28, 1)` (representing
MNIST digits):
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
keras.layers.Dense(7 * 7 * 128),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Reshape((7, 7, 128)),
keras.layers.Conv2DTranspose(
128, (4, 4), strides=(2, 2), padding="same"
),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2DTranspose(
128, (4, 4), strides=(2, 2), padding="same"
),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's the key bit: the training loop. As you can see it is quite straightforward. The
training step function only takes 17 lines.
"""
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply(grads, discriminator.trainable_weights)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply(grads, generator.trainable_weights)
return d_loss, g_loss, generated_images
"""
Let's train our GAN, by repeatedly calling `train_step` on batches of images.
Since our discriminator and generator are convnets, you're going to want to
run this code on a GPU.
"""
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print(f"\nStart epoch {epoch}")
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 100 == 0:
# Print metrics
print(f"discriminator loss at step {step}: {d_loss:.2f}")
print(f"adversarial loss at step {step}: {g_loss:.2f}")
# Save one generated image
img = keras.utils.array_to_img(
generated_images[0] * 255.0, scale=False
)
img.save(os.path.join(save_dir, f"generated_img_{step}.png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
"""
That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the
Colab GPU.
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/understanding_masking_and_padding.py | guides/understanding_masking_and_padding.py | """
Title: Understanding masking & padding
Authors: Scott Zhu, Francois Chollet
Date created: 2019/07/16
Last modified: 2023/06/25
Description: Complete guide to using mask-aware sequence layers in Keras.
Accelerator: None
"""
"""
## Setup
"""
import numpy as np
import keras
from keras import ops
from keras import layers
"""
## Introduction
**Masking** is a way to tell sequence-processing layers that certain timesteps
in an input are missing, and thus should be skipped when processing the data.
**Padding** is a special form of masking where the masked steps are at the start or
the end of a sequence. Padding comes from the need to encode sequence data into
contiguous batches: in order to make all sequences in a batch fit a given standard
length, it is necessary to pad or truncate some sequences.
Let's take a close look.
"""
"""
## Padding sequence data
When processing sequence data, it is very common for individual samples to have
different lengths. Consider the following example (text tokenized as words):
```
[
["Hello", "world", "!"],
["How", "are", "you", "doing", "today"],
["The", "weather", "will", "be", "nice", "tomorrow"],
]
```
After vocabulary lookup, the data might be vectorized as integers, e.g.:
```
[
[71, 1331, 4231]
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
```
The data is a nested list where individual samples have length 3, 5, and 6,
respectively. Since the input data for a deep learning model must be a single tensor
(of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter
than the longest item need to be padded with some placeholder value (alternatively,
one might also truncate long samples before padding short samples).
Keras provides a utility function to truncate and pad Python lists to a common length:
`keras.utils.pad_sequences`.
"""
raw_inputs = [
[711, 632, 71],
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
# By default, this will pad using 0s; it is configurable via the
# "value" parameter.
# Note that you could use "pre" padding (at the beginning) or
# "post" padding (at the end).
# We recommend using "post" padding when working with RNN layers
# (in order to be able to use the
# CuDNN implementation of the layers).
padded_inputs = keras.utils.pad_sequences(raw_inputs, padding="post")
print(padded_inputs)
"""
## Masking
Now that all samples have a uniform length, the model must be informed that some part
of the data is actually padding and should be ignored. That mechanism is **masking**.
There are three ways to introduce input masks in Keras models:
- Add a `keras.layers.Masking` layer.
- Configure a `keras.layers.Embedding` layer with `mask_zero=True`.
- Pass a `mask` argument manually when calling layers that support this argument (e.g.
RNN layers).
"""
"""
## Mask-generating layers: `Embedding` and `Masking`
Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch,
sequence_length)`), and attach it to the tensor output returned by the `Masking` or
`Embedding` layer.
"""
embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
masked_output = embedding(padded_inputs)
print(masked_output._keras_mask)
masking_layer = layers.Masking()
# Simulate the embedding lookup by expanding the 2D input to 3D,
# with embedding dimension of 10.
unmasked_embedding = ops.cast(
ops.tile(ops.expand_dims(padded_inputs, axis=-1), [1, 1, 10]),
dtype="float32",
)
masked_embedding = masking_layer(unmasked_embedding)
print(masked_embedding._keras_mask)
"""
As you can see from the printed result, the mask is a 2D boolean tensor with shape
`(batch_size, sequence_length)`, where each individual `False` entry indicates that
the corresponding timestep should be ignored during processing.
"""
"""
## Mask propagation in the Functional API and Sequential API
When using the Functional API or the Sequential API, a mask generated by an `Embedding`
or `Masking` layer will be propagated through the network for any layer that is
capable of using them (for example, RNN layers). Keras will automatically fetch the
mask corresponding to an input and pass it to any layer that knows how to use it.
For instance, in the following Sequential model, the `LSTM` layer will automatically
receive a mask, which means it will ignore padded values:
"""
model = keras.Sequential(
[
layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True),
layers.LSTM(32),
]
)
"""
This is also the case for the following Functional API model:
"""
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)
model = keras.Model(inputs, outputs)
"""
## Passing mask tensors directly to layers
"""
"""
Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their
`__call__` method.
Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input,
previous_mask)` method which you can call.
Thus, you can pass the output of the `compute_mask()` method of a mask-producing layer
to the `__call__` method of a mask-consuming layer, like this:
"""
class MyLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embedding = layers.Embedding(
input_dim=5000, output_dim=16, mask_zero=True
)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(
x, mask=mask
) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
"""
## Supporting masking in your custom layers
"""
"""
Sometimes, you may need to write layers that generate a mask (like `Embedding`), or
layers that need to modify the current mask.
For instance, any layer that produces a tensor with a different time dimension than its
input, such as a `Concatenate` layer that concatenates on the time dimension, will
need to modify the current mask so that downstream layers will be able to properly
take masked timesteps into account.
To do this, your layer should implement the `layer.compute_mask()` method, which
produces a new mask given the input and the current mask.
Here is an example of a `TemporalSplit` layer that needs to modify the current mask.
"""
class TemporalSplit(keras.layers.Layer):
"""Split the input tensor into 2 tensors along the time dimension."""
def call(self, inputs):
# Expect the input to be 3D and mask to be 2D, split the input tensor into 2
# subtensors along the time axis (axis 1).
return ops.split(inputs, 2, axis=1)
def compute_mask(self, inputs, mask=None):
# Also split the mask into 2 if it presents.
if mask is None:
return None
return ops.split(mask, 2, axis=1)
first_half, second_half = TemporalSplit()(masked_embedding)
print(first_half._keras_mask)
print(second_half._keras_mask)
"""
Here is another example of a `CustomEmbedding` layer that is capable of generating a
mask from input values:
"""
class CustomEmbedding(keras.layers.Layer):
def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.mask_zero = mask_zero
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer="random_normal",
dtype="float32",
)
def call(self, inputs):
inputs = ops.cast(inputs, "int32")
return ops.take(self.embeddings, inputs)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
layer = CustomEmbedding(10, 32, mask_zero=True)
x = np.random.random((3, 10)) * 9
x = x.astype("int32")
y = layer(x)
mask = layer.compute_mask(x)
print(mask)
"""
Note: For more details about format limitations related to masking, see the
[serialization guide](/guides/serialization_and_saving).
"""
"""
## Opting-in to mask propagation on compatible layers
Most layers don't modify the time dimension, so don't need to modify the current mask.
However, they may still want to be able to **propagate** the current mask, unchanged,
to the next layer. **This is an opt-in behavior.** By default, a custom layer will
destroy the current mask (since the framework has no way to tell whether propagating
the mask is safe to do).
If you have a custom layer that does not modify the time dimension, and if you want it
to be able to propagate the current input mask, you should set `self.supports_masking
= True` in the layer constructor. In this case, the default behavior of
`compute_mask()` is to just pass the current mask through.
Here's an example of a layer that is whitelisted for mask propagation:
"""
class MyActivation(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Signal that the layer is safe for mask propagation
self.supports_masking = True
def call(self, inputs):
return ops.relu(inputs)
"""
You can now use this custom layer in-between a mask-generating layer (like `Embedding`)
and a mask-consuming layer (like `LSTM`), and it will pass the mask along so that it
reaches the mask-consuming layer.
"""
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
x = MyActivation()(x) # Will pass the mask along
print("Mask found:", x._keras_mask)
outputs = layers.LSTM(32)(x) # Will receive the mask
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 5000, size=(32, 100)))
"""
## Writing layers that need mask information
Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to
determine whether to skip certain time steps.
To write such a layer, you can simply add a `mask=None` argument in your `call`
signature. The mask associated with the inputs will be passed to your layer whenever
it is available.
Here's a simple example below: a layer that computes a softmax over the time dimension
(axis 1) of an input sequence, while discarding masked timesteps.
"""
class TemporalSoftmax(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
broadcast_float_mask = ops.expand_dims(ops.cast(mask, "float32"), -1)
inputs_exp = ops.exp(inputs) * broadcast_float_mask
inputs_sum = ops.sum(
inputs_exp * broadcast_float_mask, axis=-1, keepdims=True
)
return inputs_exp / inputs_sum
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs)
x = layers.Dense(1)(x)
outputs = TemporalSoftmax()(x)
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 10, size=(32, 100)))
"""
## Summary
That is all you need to know about padding & masking in Keras. To recap:
- "Masking" is how layers are able to know when to skip / ignore certain timesteps in
sequence inputs.
- Some layers are mask-generators: `Embedding` can generate a mask from input values
(if `mask_zero=True`), and so can the `Masking` layer.
- Some layers are mask-consumers: they expose a `mask` argument in their `__call__`
method. This is the case for RNN layers.
- In the Functional API and Sequential API, mask information is propagated
automatically.
- When using layers in a standalone way, you can pass the `mask` arguments to layers
manually.
- You can easily write layers that modify the current mask, that generate a new mask,
or that consume the mask associated with the inputs.
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/guides/distributed_training_with_torch.py | guides/distributed_training_with_torch.py | """
Title: Multi-GPU distributed training with PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/29
Last modified: 2023/06/29
Description: Guide to multi-GPU training for Keras models with PyTorch.
Accelerator: GPU
"""
"""
## Introduction
There are generally two ways to distribute computation across multiple devices:
**Data parallelism**, where a single model gets replicated on multiple devices or
multiple machines. Each of them processes different batches of data, then they merge
their results. There exist many variants of this setup, that differ in how the different
model replicas merge results, in whether they stay in sync at every batch or whether they
are more loosely coupled, etc.
**Model parallelism**, where different parts of a single model run on different devices,
processing a single batch of data together. This works best with models that have a
naturally-parallel architecture, such as models that feature multiple branches.
This guide focuses on data parallelism, in particular **synchronous data parallelism**,
where the different replicas of the model stay in sync after each batch they process.
Synchronicity keeps the model convergence behavior identical to what you would see for
single-device training.
Specifically, this guide teaches you how to use PyTorch's `DistributedDataParallel`
module wrapper to train Keras, with minimal changes to your code,
on multiple GPUs (typically 2 to 16) installed on a single machine (single host,
multi-device training). This is the most common setup for researchers and small-scale
industry workflows.
"""
"""
## Setup
Let's start by defining the function that creates the model that we will train,
and the function that creates the dataset we will train on (MNIST in this case).
"""
import os
os.environ["KERAS_BACKEND"] = "torch"
import torch
import numpy as np
import keras
def get_model():
# Make a simple convnet with batch normalization and dropout.
inputs = keras.Input(shape=(28, 28, 1))
x = keras.layers.Rescaling(1.0 / 255.0)(inputs)
x = keras.layers.Conv2D(
filters=12, kernel_size=3, padding="same", use_bias=False
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters=24,
kernel_size=6,
use_bias=False,
strides=2,
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters=32,
kernel_size=6,
padding="same",
strides=2,
name="large_k",
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(256, activation="relu")(x)
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
return model
def get_dataset():
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
return dataset
"""
Next, let's define a simple PyTorch training loop that targets
a GPU (note the calls to `.cuda()`).
"""
def train_model(model, dataloader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
running_loss_count = 0
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_loss_count += 1
# Print loss statistics
print(
f"Epoch {epoch + 1}/{num_epochs}, "
f"Loss: {running_loss / running_loss_count}"
)
"""
## Single-host, multi-device synchronous training
In this setup, you have one machine with several GPUs on it (typically 2 to 16). Each
device will run a copy of your model (called a **replica**). For simplicity, in what
follows, we'll assume we're dealing with 8 GPUs, at no loss of generality.
**How it works**
At each step of training:
- The current batch of data (called **global batch**) is split into 8 different
sub-batches (called **local batches**). For instance, if the global batch has 512
samples, each of the 8 local batches will have 64 samples.
- Each of the 8 replicas independently processes a local batch: they run a forward pass,
then a backward pass, outputting the gradient of the weights with respect to the loss of
the model on the local batch.
- The weight updates originating from local gradients are efficiently merged across the 8
replicas. Because this is done at the end of every step, the replicas always stay in
sync.
In practice, the process of synchronously updating the weights of the model replicas is
handled at the level of each individual weight variable. This is done through a **mirrored
variable** object.
**How to use it**
To do single-host, multi-device synchronous training with a Keras model, you would use
the `torch.nn.parallel.DistributedDataParallel` module wrapper.
Here's how it works:
- We use `torch.multiprocessing.start_processes` to start multiple Python processes, one
per device. Each process will run the `per_device_launch_fn` function.
- The `per_device_launch_fn` function does the following:
- It uses `torch.distributed.init_process_group` and `torch.cuda.set_device`
to configure the device to be used for that process.
- It uses `torch.utils.data.distributed.DistributedSampler`
and `torch.utils.data.DataLoader` to turn our data into a distributed data loader.
- It also uses `torch.nn.parallel.DistributedDataParallel` to turn our model into
a distributed PyTorch module.
- It then calls the `train_model` function.
- The `train_model` function will then run in each process, with the model using
a separate device in each process.
Here's the flow, where each step is split into its own utility function:
"""
# Config
num_gpu = torch.cuda.device_count()
num_epochs = 2
batch_size = 64
print(f"Running on {num_gpu} GPUs")
def setup_device(current_gpu_index, num_gpus):
# Device setup
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "56492"
device = torch.device("cuda:{}".format(current_gpu_index))
torch.distributed.init_process_group(
backend="nccl",
init_method="env://",
world_size=num_gpus,
rank=current_gpu_index,
)
torch.cuda.set_device(device)
def cleanup():
torch.distributed.destroy_process_group()
def prepare_dataloader(dataset, current_gpu_index, num_gpus, batch_size):
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=num_gpus,
rank=current_gpu_index,
shuffle=False,
)
dataloader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
)
return dataloader
def per_device_launch_fn(current_gpu_index, num_gpu):
# Setup the process groups
setup_device(current_gpu_index, num_gpu)
dataset = get_dataset()
model = get_model()
# prepare the dataloader
dataloader = prepare_dataloader(
dataset, current_gpu_index, num_gpu, batch_size
)
# Instantiate the torch optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate the torch loss function
loss_fn = torch.nn.CrossEntropyLoss()
# Put model on device
model = model.to(current_gpu_index)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[current_gpu_index], output_device=current_gpu_index
)
train_model(ddp_model, dataloader, num_epochs, optimizer, loss_fn)
cleanup()
"""
Time to start multiple processes:
"""
if __name__ == "__main__":
# We use the "fork" method rather than "spawn" to support notebooks
torch.multiprocessing.start_processes(
per_device_launch_fn,
args=(num_gpu,),
nprocs=num_gpu,
join=True,
start_method="fork",
)
"""
That's it!
"""
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.